* When this lock is held the pointers can't change, ONLINE and
* OFFLINE blocks can't change the state and no subblocks will get
* plugged/unplugged.
+ *
+ * In kdump mode, used to serialize requests, last_block_addr and
+ * last_block_plugged.
*/
struct mutex hotplug_mutex;
bool hotplug_active;
/* An error occurred we cannot handle - stop processing requests. */
bool broken;
+ /* Cached valued of is_kdump_kernel() when the device was probed. */
+ bool in_kdump;
+
/* The driver is being removed. */
spinlock_t removal_lock;
bool removing;
/* Memory notifier (online/offline events). */
struct notifier_block memory_notifier;
+#ifdef CONFIG_PROC_VMCORE
+ /* vmcore callback for /proc/vmcore handling in kdump mode */
+ struct vmcore_cb vmcore_cb;
+ uint64_t last_block_addr;
+ bool last_block_plugged;
+#endif /* CONFIG_PROC_VMCORE */
+
/* Next device in the list of virtio-mem devices. */
struct list_head next;
};
uint64_t diff;
int rc;
+ if (unlikely(vm->in_kdump)) {
+ dev_warn_once(&vm->vdev->dev,
+ "unexpected workqueue run in kdump kernel\n");
+ return;
+ }
+
hrtimer_cancel(&vm->retry_timer);
if (vm->broken)
return rc;
}
+#ifdef CONFIG_PROC_VMCORE
+static int virtio_mem_send_state_request(struct virtio_mem *vm, uint64_t addr,
+ uint64_t size)
+{
+ const uint64_t nb_vm_blocks = size / vm->device_block_size;
+ const struct virtio_mem_req req = {
+ .type = cpu_to_virtio16(vm->vdev, VIRTIO_MEM_REQ_STATE),
+ .u.state.addr = cpu_to_virtio64(vm->vdev, addr),
+ .u.state.nb_blocks = cpu_to_virtio16(vm->vdev, nb_vm_blocks),
+ };
+ int rc = -ENOMEM;
+
+ dev_dbg(&vm->vdev->dev, "requesting state: 0x%llx - 0x%llx\n", addr,
+ addr + size - 1);
+
+ switch (virtio_mem_send_request(vm, &req)) {
+ case VIRTIO_MEM_RESP_ACK:
+ return virtio16_to_cpu(vm->vdev, vm->resp.u.state.state);
+ case VIRTIO_MEM_RESP_ERROR:
+ rc = -EINVAL;
+ break;
+ default:
+ break;
+ }
+
+ dev_dbg(&vm->vdev->dev, "requesting state failed: %d\n", rc);
+ return rc;
+}
+
+static bool virtio_mem_vmcore_pfn_is_ram(struct vmcore_cb *cb,
+ unsigned long pfn)
+{
+ struct virtio_mem *vm = container_of(cb, struct virtio_mem,
+ vmcore_cb);
+ uint64_t addr = PFN_PHYS(pfn);
+ bool is_ram;
+ int rc;
+
+ if (!virtio_mem_contains_range(vm, addr, PAGE_SIZE))
+ return true;
+ if (!vm->plugged_size)
+ return false;
+
+ /*
+ * We have to serialize device requests and access to the information
+ * about the block queried last.
+ */
+ mutex_lock(&vm->hotplug_mutex);
+
+ addr = ALIGN_DOWN(addr, vm->device_block_size);
+ if (addr != vm->last_block_addr) {
+ rc = virtio_mem_send_state_request(vm, addr,
+ vm->device_block_size);
+ /* On any kind of error, we're going to signal !ram. */
+ if (rc == VIRTIO_MEM_STATE_PLUGGED)
+ vm->last_block_plugged = true;
+ else
+ vm->last_block_plugged = false;
+ vm->last_block_addr = addr;
+ }
+
+ is_ram = vm->last_block_plugged;
+ mutex_unlock(&vm->hotplug_mutex);
+ return is_ram;
+}
+#endif /* CONFIG_PROC_VMCORE */
+
+static int virtio_mem_init_kdump(struct virtio_mem *vm)
+{
+#ifdef CONFIG_PROC_VMCORE
+ dev_info(&vm->vdev->dev, "memory hot(un)plug disabled in kdump kernel\n");
+ vm->vmcore_cb.pfn_is_ram = virtio_mem_vmcore_pfn_is_ram;
+ register_vmcore_cb(&vm->vmcore_cb);
+ return 0;
+#else /* CONFIG_PROC_VMCORE */
+ dev_warn(&vm->vdev->dev, "disabled in kdump kernel without vmcore\n");
+ return -EBUSY;
+#endif /* CONFIG_PROC_VMCORE */
+}
+
static int virtio_mem_init(struct virtio_mem *vm)
{
uint16_t node_id;
return -EINVAL;
}
- /*
- * We don't want to (un)plug or reuse any memory when in kdump. The
- * memory is still accessible (but not mapped).
- */
- if (is_kdump_kernel()) {
- dev_warn(&vm->vdev->dev, "disabled in kdump kernel\n");
- return -EBUSY;
- }
-
/* Fetch all properties that can't change. */
virtio_cread_le(vm->vdev, struct virtio_mem_config, plugged_size,
&vm->plugged_size);
if (vm->nid != NUMA_NO_NODE && IS_ENABLED(CONFIG_NUMA))
dev_info(&vm->vdev->dev, "nid: %d", vm->nid);
+ /*
+ * We don't want to (un)plug or reuse any memory when in kdump. The
+ * memory is still accessible (but not exposed to Linux).
+ */
+ if (vm->in_kdump)
+ return virtio_mem_init_kdump(vm);
return virtio_mem_init_hotplug(vm);
}
hrtimer_init(&vm->retry_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
vm->retry_timer.function = virtio_mem_timer_expired;
vm->retry_timer_ms = VIRTIO_MEM_RETRY_TIMER_MIN_MS;
+ vm->in_kdump = is_kdump_kernel();
/* register the virtqueue */
rc = virtio_mem_init_vq(vm);
virtio_device_ready(vdev);
/* trigger a config update to start processing the requested_size */
- atomic_set(&vm->config_changed, 1);
- queue_work(system_freezable_wq, &vm->wq);
+ if (!vm->in_kdump) {
+ atomic_set(&vm->config_changed, 1);
+ queue_work(system_freezable_wq, &vm->wq);
+ }
return 0;
out_del_vq:
}
}
+static void virtio_mem_deinit_kdump(struct virtio_mem *vm)
+{
+#ifdef CONFIG_PROC_VMCORE
+ unregister_vmcore_cb(&vm->vmcore_cb);
+#endif /* CONFIG_PROC_VMCORE */
+}
+
static void virtio_mem_remove(struct virtio_device *vdev)
{
struct virtio_mem *vm = vdev->priv;
- virtio_mem_deinit_hotplug(vm);
+ if (vm->in_kdump)
+ virtio_mem_deinit_kdump(vm);
+ else
+ virtio_mem_deinit_hotplug(vm);
/* reset the device and cleanup the queues */
vdev->config->reset(vdev);
{
struct virtio_mem *vm = vdev->priv;
+ if (unlikely(vm->in_kdump))
+ return;
+
atomic_set(&vm->config_changed, 1);
virtio_mem_retry(vm);
}