diff --git a/src/core/hypercall.c b/src/core/hypercall.c index e8eb6e67..afc49280 100644 --- a/src/core/hypercall.c +++ b/src/core/hypercall.c @@ -20,6 +20,9 @@ long int hypercall(unsigned long id) case HC_IPC: ret = ipc_hypercall(arg0, arg1, arg2); break; + case HC_REMOTE_IO: + ret = remote_io_hypercall(arg0, arg1, arg2); + break; default: WARNING("Unknown hypercall id %d", id); } diff --git a/src/core/inc/hypercall.h b/src/core/inc/hypercall.h index cb510c47..58331301 100644 --- a/src/core/inc/hypercall.h +++ b/src/core/inc/hypercall.h @@ -9,7 +9,7 @@ #include #include -enum { HC_INVAL = 0, HC_IPC = 1 }; +enum { HC_INVAL = 0, HC_IPC = 1, HC_REMOTE_IO = 2 }; enum { HC_E_SUCCESS = 0, HC_E_FAILURE = 1, HC_E_INVAL_ID = 2, HC_E_INVAL_ARGS = 3 }; diff --git a/src/core/inc/remote_io.h b/src/core/inc/remote_io.h new file mode 100644 index 00000000..8578b1ce --- /dev/null +++ b/src/core/inc/remote_io.h @@ -0,0 +1,77 @@ +/** + * SPDX-License-Identifier: Apache-2.0 + * Copyright (c) Bao Project and Contributors. All rights reserved. + */ + +/** + * @file remote_io.h + * @brief This header file contains the Remote I/O device interface + */ + +#ifndef __REMOTE_IO_H__ +#define __REMOTE_IO_H__ + +#include +#include +#include +#include + +/** + * @struct remote_io_shmem + * @brief This structure represents a shared memory region used by a Remote I/O device + */ +struct remote_io_shmem { + paddr_t base; /**< Shared memory base address */ + size_t size; /**< Shared memory size */ + size_t shmem_id; /**< Shared memory ID */ +}; + +/** + * @struct remote_io_dev + * @brief This structure represents a Remote I/O device + * @note The device can be either a frontend (driver) or a backend (device) + */ +struct remote_io_dev { + vaddr_t va; /**< Frontend MMIO base virtual address */ + size_t size; /**< Frontend MMIO size */ + irqid_t interrupt; /**< Frontend/backend interrupt number */ + uint64_t id; /**< Remote I/O ID */ + bool is_backend; /**< True if the device is a backend */ + struct remote_io_shmem shmem; /**< Shared memory region */ +}; + +/** + * @brief Remote I/O device initialization routine + * @note Executed only once by the master CPU + */ +void remote_io_init(void); + +/** + * @brief Remote I/O device CPU assignment routine + * @note Executed by each VM that holds a Remote I/O device, it is responsible for + * assigning the frontend or backend CPU ID for the respective Remote I/O device + * If the VM was alloacted with more than one CPU the assigned CPU will be the + * one with the lowest ID, since it is only required one CPU for VM interrupt injecting + * @param vm Pointer to the VM structure + */ +void remote_io_assign_cpus(struct vm* vm); + +/** + * @brief Remote I/O hypercall callback + * @note Used to exchange information between the Remote I/O system and the backend VM + * @param arg0 First argument of the hypercall + * @param arg1 Second argument of the hypercall + * @param arg2 Third argument of the hypercall + * @return Returns the number of pending I/O requests + */ +long int remote_io_hypercall(unsigned long arg0, unsigned long arg1, unsigned long arg2); + +/** + * @brief Remote I/O MMIO emulation handler + * @note Executed by the frontend VM when a MMIO access is performed + * @param emul_access Holds the information about the MMIO access + * @return Returns true if handled successfully, false otherwise + */ +bool remote_io_mmio_emul_handler(struct emul_access* emul_access); + +#endif /* __REMOTE_IO_H__ */ diff --git a/src/core/inc/vm.h b/src/core/inc/vm.h index a2177cd9..06a33f70 100644 --- a/src/core/inc/vm.h +++ b/src/core/inc/vm.h @@ -17,6 +17,7 @@ #include #include #include +#include struct vm_mem_region { paddr_t base; @@ -47,6 +48,9 @@ struct vm_platform { size_t dev_num; struct vm_dev_region* devs; + size_t remote_io_dev_num; + struct remote_io_dev* remote_io_devs; + // /** // * In MPU-based platforms which might also support virtual memory // * (i.e. aarch64 cortex-r) the hypervisor sets up the VM using an MPU by @@ -84,6 +88,9 @@ struct vm { size_t ipc_num; struct ipc* ipcs; + + size_t remote_io_dev_num; + struct remote_io_dev* remote_io_devs; }; struct vcpu { diff --git a/src/core/objects.mk b/src/core/objects.mk index 89cbdaa4..c6cf650e 100644 --- a/src/core/objects.mk +++ b/src/core/objects.mk @@ -14,3 +14,4 @@ core-objs-y+=ipc.o core-objs-y+=objpool.o core-objs-y+=hypercall.o core-objs-y+=shmem.o +core-objs-y+=remote_io.o diff --git a/src/core/remote_io.c b/src/core/remote_io.c new file mode 100644 index 00000000..68fdd3b8 --- /dev/null +++ b/src/core/remote_io.c @@ -0,0 +1,513 @@ +/** + * SPDX-License-Identifier: Apache-2.0 + * Copyright (c) Bao Project and Contributors. All rights reserved. + */ + +/** + * @file remote_io.c + * @brief This source file contains the Remote I/O implementation + */ + +#include +#include +#include +#include +#include +#include +#include + +#define REMOTE_IO_MAX_DEVICES 32 +#define REMOTE_IO_DEVICE_UNINITIALIZED -1 +#define REMOTE_IO_CPU_NUM PLAT_CPU_NUM +#define REMOTE_IO_VCPU_NUM PLAT_CPU_NUM + +/** + * @enum REMOTE_IO_HYP_EVENT + * @brief This enum represents the Remote I/O hypercall events + * @note Used by the backend VM to specify the operation to be performed + */ +enum REMOTE_IO_HYP_EVENT { + REMOTE_IO_HYP_WRITE, /**< Write operation */ + REMOTE_IO_HYP_READ, /**< Read operation */ + REMOTE_IO_HYP_ASK, /**< Ask operation (used to request a new pending I/O request) */ + REMOTE_IO_HYP_NOTIFY, /**< Notify operation (used buffer or configuration change) */ +}; + +/** + * @enum REMOTE_IO_CPU_MSG_EVENT + * @brief This enum represents the Remote I/O CPU message events + */ +enum REMOTE_IO_CPU_MSG_EVENT { + REMOTE_IO_CPU_MSG_WRITE, /**< Write notification */ + REMOTE_IO_CPU_MSG_READ, /**< Read notification */ + REMOTE_IO_CPU_MSG_NOTIFY, /**< Notify notification (used to inject an interrupt into the + frontend or backend VM) */ +}; + +/** + * @enum REMOTE_IO_STATE + * @brief This enum represents the I/O request states + */ +enum REMOTE_IO_STATE { + REMOTE_IO_STATE_FREE, /**< The I/O request slot is free */ + REMOTE_IO_STATE_PENDING, /**< The I/O request is pending to be processed by the backend VM */ + REMOTE_IO_STATE_PROCESSING, /**< The I/O request is being processed by the backend VM */ + REMOTE_IO_STATE_COMPLETE, /**< The I/O request was completed by the backend VM but not yet + completed on frontend VM's eyes */ +}; + +/** + * @union remote_io_cpu_msg_data + * @brief This union represents the Remote I/O CPU message data + */ +union remote_io_cpu_msg_data { + struct { + uint8_t id; /**< Remote I/O ID */ + uint8_t cpu_id; /**< Frontend CPU ID */ + uint8_t vcpu_id; /**< Frontend vCPU ID */ + uint8_t interrupt; /**< Interrupt ID */ + }; + uint64_t raw; /**< Raw data */ +}; + +/** + * @struct remote_io_request + * @brief This structure contains the information of a Remote I/O request + */ +struct remote_io_request { + vaddr_t addr; /**< Address of the accessed MMIO Register */ + unsigned long access_width; /**< Access width */ + unsigned long op; /**< MMIO operation type (read or write) */ + unsigned long value; /**< Value to be written or read */ + unsigned long reg; /**< vCPU resgiter used during the MMIO access */ + enum REMOTE_IO_STATE state; /**< I/O request state */ +}; + +/** + * @struct remote_io_request_event + * @brief This structure contains the information of a Remote I/O request event + * @note This is used to only advertise the backend VM that a new I/O request is available + * and the backend VM can ask for it, avoiding the need to iterate over all the + * CPUs and vCPUs slots to find the next I/O request to be processed + */ +struct remote_io_request_event { + node_t node; /** Node */ + cpuid_t cpu_id; /** CPU ID of the frontend VM that issued the I/O request */ + vcpuid_t vcpu_id; /** vCPU ID of the frontend VM that issued the I/O request */ +}; + +/** + * @struct remote_io_device_config + * @brief This structure holds the static information regarding a Remote I/O device + */ +struct remote_io_device_config { + cpuid_t backend_cpu_id; /**< Backend VM CPU ID */ + vmid_t backend_vm_id; /**< Backend VM ID */ + irqid_t backend_interrupt; /**< Backend interrupt ID */ + cpuid_t frontend_cpu_id; /**< Frontend VM CPU ID */ + vmid_t frontend_vm_id; /**< Frontend VM ID */ + irqid_t frontend_interrupt; /**< Frontend interrupt ID */ +}; + +/** + * @struct remote_io_device + * @brief This structure comprises all the information needed about a Remote I/O device + */ +struct remote_io_device { + node_t node; /**< Node */ + uint64_t id; /**< Remote I/O device ID */ + struct remote_io_device_config config; /**< Remote I/O device configuration */ + struct list request_event_list; /**< List of pending I/O requests events */ +}; + +/** List of Remote I/O devices */ +struct list remote_io_device_list; + +/** Array of Remote I/O requests */ +struct remote_io_request remote_io_requests[REMOTE_IO_CPU_NUM][REMOTE_IO_VCPU_NUM]; + +/** Spinlock to protect the access to the Remote I/O request array */ +spinlock_t remote_io_request_lock = SPINLOCK_INITVAL; + +/** + * @brief Remote I/O CPU message handler + * @param event Message event (REMOTE_IO_CPU_MSG_*) + * @param data Remote I/O CPU message data (remote_io_cpu_msg_data) + */ +static void remote_io_cpu_handler(uint32_t event, uint64_t data); + +/** Associate the Remote I/O CPU message handler with a new Remote I/O CPU message ID */ +CPU_MSG_HANDLER(remote_io_cpu_handler, REMOTE_IO_CPUMSG_ID) + +/** Object pool to allocate Remote I/O devices */ +OBJPOOL_ALLOC(remote_io_device_pool, struct remote_io_device, sizeof(struct remote_io_device)); + +/** Object pool to allocate pending Remote I/O requests events */ +OBJPOOL_ALLOC(remote_io_request_event_pool, struct remote_io_request_event, + sizeof(struct remote_io_request_event)); + +/** Auxliar macro to get the minimum value between two numbers */ +#define MIN(a, b) ((a) < (b) ? (a) : (b)) + +void remote_io_init(void) +{ + size_t i, vm_id, frontend_cnt = 0, backend_cnt = 0; + int devices[REMOTE_IO_MAX_DEVICES][2]; + + /** Only execute the Remote I/O initialization routine on the master CPU */ + if (!cpu_is_master()) { + return; + } + + objpool_init(&remote_io_device_pool); + objpool_init(&remote_io_request_event_pool); + list_init(&remote_io_device_list); + + for (i = 0; i < REMOTE_IO_MAX_DEVICES; i++) { + devices[i][0] = REMOTE_IO_DEVICE_UNINITIALIZED; + devices[i][1] = REMOTE_IO_DEVICE_UNINITIALIZED; + } + + /** Create the Remote I/O devices based on the VM configuration */ + for (vm_id = 0; vm_id < config.vmlist_size; vm_id++) { + struct vm_config* vm_config = &config.vmlist[vm_id]; + for (i = 0; i < vm_config->platform.remote_io_dev_num; i++) { + struct remote_io_dev* dev = &vm_config->platform.remote_io_devs[i]; + if (devices[dev->id][0] != REMOTE_IO_DEVICE_UNINITIALIZED && dev->is_backend) { + ERROR("Failed to link backend to the frontend, more than one backend was " + "atributed to the Remote I/O device %d", + dev->id); + } + if (devices[dev->id][1] != REMOTE_IO_DEVICE_UNINITIALIZED && !dev->is_backend) { + ERROR("Failed to link backend to the frontend, more than one frontend was " + "atributed to the Remote I/O device %d", + dev->id); + } + if (dev->is_backend) { + struct remote_io_device* node = objpool_alloc(&remote_io_device_pool); + if (node == NULL) { + ERROR("Failed allocating Remote I/O device node"); + } + node->id = dev->id; + list_init(&node->request_event_list); + list_push(&remote_io_device_list, (node_t*)node); + backend_cnt++; + devices[dev->id][0] = (int)vm_id; + } else { + frontend_cnt++; + devices[dev->id][1] = (int)vm_id; + } + } + } + + /** Check if there is a 1-to-1 mapping between a Remote I/O backend and Remote I/O frontend */ + if (backend_cnt != frontend_cnt) { + ERROR("There is no 1-to-1 mapping between a Remote I/O backend and Remote I/O frontend"); + } + + /** Update the Remote I/O device configuration */ + for (vm_id = 0; vm_id < config.vmlist_size; vm_id++) { + struct vm_config* vm_config = &config.vmlist[vm_id]; + for (i = 0; i < vm_config->platform.remote_io_dev_num; i++) { + struct remote_io_dev* dev = &vm_config->platform.remote_io_devs[i]; + list_foreach (remote_io_device_list, struct remote_io_device, io_device) { + if (dev->id == io_device->id) { + if (dev->is_backend) { + io_device->config.backend_vm_id = vm_id; + io_device->config.backend_interrupt = dev->interrupt; + io_device->config.backend_cpu_id = (cpuid_t)-1; + } else { + io_device->config.frontend_vm_id = vm_id; + io_device->config.frontend_interrupt = dev->interrupt; + io_device->config.frontend_cpu_id = (cpuid_t)-1; + } + } + } + } + } + + /** Initialize the Remote I/O requests array */ + for (size_t cpu_idx = 0; cpu_idx < REMOTE_IO_CPU_NUM; cpu_idx++) { + for (size_t vcpu_idx = 0; vcpu_idx < REMOTE_IO_VCPU_NUM; vcpu_idx++) { + remote_io_requests[cpu_idx][vcpu_idx].state = REMOTE_IO_STATE_FREE; + } + } +} + +void remote_io_assign_cpus(struct vm* vm) +{ + list_foreach (remote_io_device_list, struct remote_io_device, io_device) { + if (vm->id == io_device->config.backend_vm_id) { + io_device->config.backend_cpu_id = MIN(io_device->config.backend_cpu_id, cpu()->id); + } else if (vm->id == io_device->config.frontend_vm_id) { + io_device->config.frontend_cpu_id = MIN(io_device->config.frontend_cpu_id, cpu()->id); + } + } +} + +/** + * @brief Performs the write or read operation by updating the request value + * @param id Remote I/O device ID + * @param addr MMIO register address + * @param value Value to be written or read + * @param cpu_id CPU ID of the frontend VM that issued the I/O request + * @param vcpu_id vCPU ID of the frontend VM that issued the I/O request + * @return Returns true if the operation was successful, false otherwise + */ +static bool remote_io_w_r_operation(unsigned long id, unsigned long addr, unsigned long value, + unsigned long cpu_id, unsigned long vcpu_id) +{ + list_foreach (remote_io_device_list, struct remote_io_device, io_device) { + if (io_device->id == id) { + spin_lock(&remote_io_request_lock); + struct remote_io_request* node = &remote_io_requests[cpu_id][vcpu_id]; + spin_unlock(&remote_io_request_lock); + + if (node->addr != addr || node->state != REMOTE_IO_STATE_PROCESSING) { + break; + } + + spin_lock(&remote_io_request_lock); + node->value = value; + node->state = REMOTE_IO_STATE_COMPLETE; + spin_unlock(&remote_io_request_lock); + return true; + } + } + return false; +} + +/** + * @brief Performs the post work after the completion of the I/O request + * @note This function is executed by the frontend VM and is responsible for updating the + * vCPU register in case of a read operation and activating the frontend vCPU + * @param event Message event (REMOTE_IO_CPU_MSG_*) + * @param id Remote I/O device ID + * @param cpu_id CPU ID of the frontend VM that issued the I/O request + * @param vcpu_id vCPU ID of the frontend VM that issued the I/O request + */ +static void remote_io_cpu_post_work(uint32_t event, uint8_t id, uint8_t cpu_id, uint8_t vcpu_id) +{ + list_foreach (remote_io_device_list, struct remote_io_device, io_device) { + if (io_device->id == id) { + spin_lock(&remote_io_request_lock); + struct remote_io_request* node = &remote_io_requests[cpu_id][vcpu_id]; + spin_unlock(&remote_io_request_lock); + + switch (event) { + case REMOTE_IO_CPU_MSG_READ: + vcpu_writereg(cpu()->vcpu, node->reg, node->value); + break; + default: + break; + } + + spin_lock(&remote_io_request_lock); + node->state = REMOTE_IO_STATE_FREE; + spin_unlock(&remote_io_request_lock); + + cpu()->vcpu->active = true; + break; + } + } +} + +/** + * @brief Sends a Remote I/O CPU message to the target CPU + * @param event Message event (REMOTE_IO_CPU_MSG_*) + * @param target_cpu Target CPU ID + * @param id Remote I/O device ID + * @param cpu_id CPU ID of the frontend VM that issued the I/O request + * @param vcpu_id vCPU ID of the frontend VM that issued the I/O request + * @param interrupt Interrupt ID + */ +static void remote_io_cpu_send_msg(enum REMOTE_IO_CPU_MSG_EVENT event, unsigned long target_cpu, + unsigned long id, unsigned long cpu_id, unsigned long long vcpu_id, unsigned long interrupt) +{ + union remote_io_cpu_msg_data data = { + .id = (uint8_t)id, + .cpu_id = (uint8_t)cpu_id, + .vcpu_id = (uint8_t)vcpu_id, + .interrupt = (uint8_t)interrupt, + }; + struct cpu_msg msg = { (uint32_t)REMOTE_IO_CPUMSG_ID, event, data.raw }; + cpu_send_msg(target_cpu, &msg); +} + +long int remote_io_hypercall(unsigned long arg0, unsigned long arg1, unsigned long arg2) +{ + long int ret = -HC_E_SUCCESS; + unsigned long virt_remote_io_dev_id = arg0; + unsigned long addr = arg1; + unsigned long op = arg2; + unsigned long value = vcpu_readreg(cpu()->vcpu, HYPCALL_IN_ARG_REG(3)); + unsigned long cpu_id = vcpu_readreg(cpu()->vcpu, HYPCALL_IN_ARG_REG(4)); + unsigned long vcpu_id = vcpu_readreg(cpu()->vcpu, HYPCALL_IN_ARG_REG(5)); + struct remote_io_device* remote_io_dev = NULL; + struct vm* vm = cpu()->vcpu->vm; + + /** Check if the virtual Remote I/O device ID is within the valid range */ + if (virt_remote_io_dev_id >= vm->remote_io_dev_num) { + WARNING("Remote I/O ID (%d) exceeds the valid range for backend VM (%d)", + virt_remote_io_dev_id, vm->id); + return -HC_E_FAILURE; + } + + /** Get the absolute Remote I/O device ID based on the virtual Remote I/O device ID */ + unsigned long abs_remote_io_dev_id = vm->remote_io_devs[virt_remote_io_dev_id].id; + + /** Find the Remote I/O device associated with the current backend VM */ + list_foreach (remote_io_device_list, struct remote_io_device, io_device) { + if (abs_remote_io_dev_id == io_device->id && vm->id == io_device->config.backend_vm_id) { + remote_io_dev = io_device; + break; + } + } + + if (!remote_io_dev) { + WARNING("The Remote I/O backend device (%d) is not associated with the current backend VM " + "(%d)", + virt_remote_io_dev_id, vm->id); + return -HC_E_FAILURE; + } + + switch (op) { + case REMOTE_IO_HYP_WRITE: + case REMOTE_IO_HYP_READ: + if (!remote_io_w_r_operation(abs_remote_io_dev_id, addr, value, cpu_id, vcpu_id)) { + ret = -HC_E_FAILURE; + } else { + /** Send a CPU message to the backend VM to execute the post work */ + remote_io_cpu_send_msg(op == REMOTE_IO_HYP_WRITE ? REMOTE_IO_CPU_MSG_WRITE : + REMOTE_IO_CPU_MSG_READ, + cpu_id, abs_remote_io_dev_id, cpu_id, vcpu_id, 0); + } + break; + case REMOTE_IO_HYP_ASK: + /** By convention, the addr and value fields must be zero */ + if (addr != 0 || value != 0) { + ret = -HC_E_FAILURE; + break; + } + + /** Extract the next pending I/O request event from the list */ + struct remote_io_request_event* node = + (struct remote_io_request_event*)list_pop(&remote_io_dev->request_event_list); + + if (!node) { + ret = -HC_E_FAILURE; + break; + } + + /** Calculate the remaining number of pending I/O requests */ + ret = (long int)list_size(&remote_io_dev->request_event_list); + + spin_lock(&remote_io_request_lock); + struct remote_io_request* request = &remote_io_requests[node->cpu_id][node->vcpu_id]; + request->state = REMOTE_IO_STATE_PROCESSING; + spin_unlock(&remote_io_request_lock); + + /** Write the I/O request information to the backend VM's vCPU registers */ + vcpu_writereg(cpu()->vcpu, HYPCALL_OUT_ARG_REG(0), request->addr); + vcpu_writereg(cpu()->vcpu, HYPCALL_OUT_ARG_REG(1), request->op); + vcpu_writereg(cpu()->vcpu, HYPCALL_OUT_ARG_REG(2), request->value); + vcpu_writereg(cpu()->vcpu, HYPCALL_OUT_ARG_REG(3), request->access_width); + vcpu_writereg(cpu()->vcpu, HYPCALL_OUT_ARG_REG(4), node->cpu_id); + vcpu_writereg(cpu()->vcpu, HYPCALL_OUT_ARG_REG(5), node->vcpu_id); + + objpool_free(&remote_io_request_event_pool, node); + break; + case REMOTE_IO_HYP_NOTIFY: + /** Send a CPU message to the frontend VM to inject an interrupt */ + remote_io_cpu_send_msg(REMOTE_IO_CPU_MSG_NOTIFY, remote_io_dev->config.frontend_cpu_id, + 0, 0, 0, remote_io_dev->config.frontend_interrupt); + break; + default: + ret = -HC_E_INVAL_ARGS; + break; + } + + return ret; +} + +bool remote_io_mmio_emul_handler(struct emul_access* acc) +{ + struct vm* vm = cpu()->vcpu->vm; + struct remote_io_dev dev = { 0 }; + size_t i = 0; + + /** Find the Remote I/O device based on the MMIO access address */ + for (i = 0; i < vm->remote_io_dev_num; i++) { + dev = vm->remote_io_devs[i]; + if (acc->addr >= dev.va && acc->addr <= dev.va + dev.size) { + break; + } + } + + if (i == vm->remote_io_dev_num) { + return false; + } + + list_foreach (remote_io_device_list, struct remote_io_device, io_device) { + if (io_device->id == dev.id) { + struct remote_io_request_event* node = objpool_alloc(&remote_io_request_event_pool); + if (node == NULL) { + ERROR("Failed allocating Remote I/O request event node"); + } + /** Fill the I/O request information */ + struct remote_io_request request; + request.addr = acc->addr; + request.reg = acc->reg; + request.access_width = acc->width; + request.state = REMOTE_IO_STATE_PENDING; + + if (acc->write) { + long unsigned int value = vcpu_readreg(cpu()->vcpu, acc->reg); + request.op = REMOTE_IO_HYP_WRITE; + request.value = value; + } else { + request.op = REMOTE_IO_HYP_READ; + request.value = 0; + } + + /** Fill the I/O request event information */ + node->cpu_id = cpu()->id; + node->vcpu_id = cpu()->vcpu->id; + + /** Update the I/O request information on the Remote I/O requests array */ + spin_lock(&remote_io_request_lock); + remote_io_requests[node->cpu_id][node->vcpu_id] = request; + spin_unlock(&remote_io_request_lock); + + /** Add the I/O request event to the list */ + list_push(&io_device->request_event_list, (node_t*)node); + + /** Send a CPU message to the backend VM to then inject an interrupt */ + remote_io_cpu_send_msg(REMOTE_IO_CPU_MSG_NOTIFY, io_device->config.backend_cpu_id, 0, 0, + 0, io_device->config.backend_interrupt); + + /** Pause the current vCPU to wait for the MMIO emulation to be completed */ + cpu()->vcpu->active = false; + + return true; + } + } + return false; +} + +static void remote_io_cpu_handler(uint32_t event, uint64_t data) +{ + union remote_io_cpu_msg_data ipc_data = { .raw = data }; + switch (event) { + case REMOTE_IO_CPU_MSG_WRITE: + case REMOTE_IO_CPU_MSG_READ: + remote_io_cpu_post_work(event, ipc_data.id, ipc_data.cpu_id, ipc_data.vcpu_id); + break; + case REMOTE_IO_CPU_MSG_NOTIFY: + vcpu_inject_hw_irq(cpu()->vcpu, ipc_data.interrupt); + break; + default: + WARNING("Unknown Remote I/O CPU message event"); + break; + } +} diff --git a/src/core/vm.c b/src/core/vm.c index e9441413..7baecd87 100644 --- a/src/core/vm.c +++ b/src/core/vm.c @@ -9,6 +9,9 @@ #include #include #include +#include + +OBJPOOL_ALLOC(emul_cache, struct emul_mem, sizeof(struct emul_mem)); static void vm_master_init(struct vm* vm, const struct vm_config* vm_config, vmid_t vm_id) { @@ -225,6 +228,53 @@ static void vm_init_dev(struct vm* vm, const struct vm_config* vm_config) } } +static void vm_init_remote_io(struct vm* vm, const struct vm_config* vm_config) +{ + if (vm_config->platform.remote_io_dev_num > 0) { + vm->remote_io_dev_num = vm_config->platform.remote_io_dev_num; + vm->remote_io_devs = vm_config->platform.remote_io_devs; + + for (size_t i = 0; i < vm_config->platform.remote_io_dev_num; i++) { + struct remote_io_dev* remote_io_dev = &vm_config->platform.remote_io_devs[i]; + struct shmem* shmem = shmem_get(remote_io_dev->shmem.shmem_id); + if (shmem == NULL) { + WARNING("Invalid shmem id in configuration. Ignored."); + continue; + } + size_t shmem_size = remote_io_dev->shmem.size; + if (shmem_size > shmem->size) { + shmem_size = shmem->size; + WARNING("Trying to map region to smaller shared memory. Truncated"); + } + spin_lock(&shmem->lock); + shmem->cpu_masters |= (1UL << cpu()->id); + spin_unlock(&shmem->lock); + + struct vm_mem_region reg = { + .base = remote_io_dev->shmem.base, + .size = shmem_size, + .place_phys = true, + .phys = shmem->phys, + .colors = shmem->colors, + }; + + vm_map_mem_region(vm, ®); + + if (!remote_io_dev->is_backend) { + struct emul_mem* emu = objpool_alloc(&emul_cache); + if (emu == NULL) { + ERROR("Failed allocating emulation memory node"); + } + emu->va_base = remote_io_dev->va; + emu->size = remote_io_dev->size; + emu->handler = remote_io_mmio_emul_handler; + vm_emul_add_mem(vm, emu); + } + } + remote_io_assign_cpus(vm); + } +} + static struct vm* vm_allocation_init(struct vm_allocation* vm_alloc) { struct vm* vm = vm_alloc->vm; @@ -271,6 +321,7 @@ struct vm* vm_init(struct vm_allocation* vm_alloc, const struct vm_config* vm_co vm_init_mem_regions(vm, vm_config); vm_init_dev(vm, vm_config); vm_init_ipc(vm, vm_config); + vm_init_remote_io(vm, vm_config); } cpu_sync_and_clear_msgs(&vm->sync); diff --git a/src/core/vmm.c b/src/core/vmm.c index 41257fbc..2634c208 100644 --- a/src/core/vmm.c +++ b/src/core/vmm.c @@ -128,6 +128,7 @@ void vmm_init() vmm_arch_init(); vmm_io_init(); shmem_init(); + remote_io_init(); cpu_sync_barrier(&cpu_glb_sync);