1 /* SPDX-License-Identifier: BSD-3-Clause 2 * Copyright (C) 2021 Intel Corporation. 3 * All rights reserved. 4 */ 5 6 #include "spdk/stdinc.h" 7 8 #include <accel-config/libaccel_config.h> 9 10 #include "spdk/env.h" 11 #include "spdk/util.h" 12 #include "spdk/memory.h" 13 #include "spdk/likely.h" 14 15 #include "spdk/log.h" 16 #include "spdk_internal/idxd.h" 17 18 #include "idxd_internal.h" 19 20 struct spdk_kernel_idxd_device { 21 struct spdk_idxd_device idxd; 22 struct accfg_ctx *ctx; 23 24 unsigned int max_batch_size; 25 unsigned int max_xfer_size; 26 unsigned int max_xfer_bits; 27 28 /* We only use a single WQ */ 29 struct accfg_wq *wq; 30 int fd; 31 void *portal; 32 }; 33 34 #define __kernel_idxd(idxd) SPDK_CONTAINEROF(idxd, struct spdk_kernel_idxd_device, idxd) 35 36 static void 37 kernel_idxd_device_destruct(struct spdk_idxd_device *idxd) 38 { 39 struct spdk_kernel_idxd_device *kernel_idxd = __kernel_idxd(idxd); 40 41 if (kernel_idxd->portal != NULL) { 42 munmap(kernel_idxd->portal, 0x1000); 43 } 44 45 if (kernel_idxd->fd >= 0) { 46 close(kernel_idxd->fd); 47 } 48 49 accfg_unref(kernel_idxd->ctx); 50 free(kernel_idxd); 51 } 52 53 static struct spdk_idxd_impl g_kernel_idxd_impl; 54 55 static int 56 kernel_idxd_probe(void *cb_ctx, spdk_idxd_attach_cb attach_cb, spdk_idxd_probe_cb probe_cb) 57 { 58 int rc; 59 struct accfg_ctx *ctx; 60 struct accfg_device *device; 61 62 /* Create a configuration context, incrementing the reference count. */ 63 rc = accfg_new(&ctx); 64 if (rc < 0) { 65 SPDK_ERRLOG("Unable to allocate accel-config context\n"); 66 return rc; 67 } 68 69 /* Loop over each IDXD device */ 70 accfg_device_foreach(ctx, device) { 71 enum accfg_device_state dstate; 72 struct spdk_kernel_idxd_device *kernel_idxd; 73 struct accfg_wq *wq; 74 bool pasid_enabled; 75 76 /* Make sure that the device is enabled */ 77 dstate = accfg_device_get_state(device); 78 if (dstate != ACCFG_DEVICE_ENABLED) { 79 continue; 80 } 81 82 pasid_enabled = accfg_device_get_pasid_enabled(device); 83 if (!pasid_enabled && spdk_iommu_is_enabled()) { 84 /* 85 * If the IOMMU is enabled but shared memory mode is not on, 86 * then we have no way to get the IOVA from userspace to use this 87 * device or any kernel device. Return an error. 88 */ 89 SPDK_ERRLOG("Found kernel IDXD device, but cannot use it when IOMMU is enabled but SM is disabled\n"); 90 return -ENOTSUP; 91 } 92 93 kernel_idxd = calloc(1, sizeof(struct spdk_kernel_idxd_device)); 94 if (kernel_idxd == NULL) { 95 SPDK_ERRLOG("Failed to allocate memory for kernel_idxd device.\n"); 96 /* TODO: Goto error cleanup */ 97 return -ENOMEM; 98 } 99 100 kernel_idxd->max_batch_size = accfg_device_get_max_batch_size(device); 101 kernel_idxd->max_xfer_size = accfg_device_get_max_transfer_size(device); 102 kernel_idxd->idxd.socket_id = accfg_device_get_numa_node(device); 103 kernel_idxd->idxd.impl = &g_kernel_idxd_impl; 104 kernel_idxd->fd = -1; 105 kernel_idxd->idxd.version = accfg_device_get_version(device); 106 kernel_idxd->idxd.pasid_enabled = pasid_enabled; 107 108 /* Increment configuration context reference for each device. */ 109 kernel_idxd->ctx = accfg_ref(kernel_idxd->ctx); 110 111 accfg_wq_foreach(device, wq) { 112 enum accfg_wq_state wstate; 113 enum accfg_wq_mode mode; 114 enum accfg_wq_type type; 115 int major, minor; 116 char path[1024]; 117 118 wstate = accfg_wq_get_state(wq); 119 if (wstate != ACCFG_WQ_ENABLED) { 120 continue; 121 } 122 123 type = accfg_wq_get_type(wq); 124 if (type != ACCFG_WQT_USER) { 125 continue; 126 } 127 128 /* TODO: For now, only support dedicated WQ */ 129 mode = accfg_wq_get_mode(wq); 130 if (mode != ACCFG_WQ_DEDICATED) { 131 continue; 132 } 133 134 major = accfg_device_get_cdev_major(device); 135 if (major < 0) { 136 continue; 137 } 138 139 minor = accfg_wq_get_cdev_minor(wq); 140 if (minor < 0) { 141 continue; 142 } 143 144 /* Map the portal */ 145 snprintf(path, sizeof(path), "/dev/char/%u:%u", major, minor); 146 kernel_idxd->fd = open(path, O_RDWR); 147 if (kernel_idxd->fd < 0) { 148 SPDK_ERRLOG("Can not open the WQ file descriptor on path=%s\n", 149 path); 150 continue; 151 } 152 153 kernel_idxd->portal = mmap(NULL, 0x1000, PROT_WRITE, 154 MAP_SHARED | MAP_POPULATE, kernel_idxd->fd, 0); 155 if (kernel_idxd->portal == MAP_FAILED) { 156 if (errno == EPERM) { 157 SPDK_ERRLOG("CAP_SYS_RAWIO capabilities required to mmap the portal\n"); 158 } 159 perror("mmap"); 160 continue; 161 } 162 163 kernel_idxd->wq = wq; 164 165 /* Since we only use a single WQ, the total size is the size of this WQ */ 166 kernel_idxd->idxd.total_wq_size = accfg_wq_get_size(wq); 167 kernel_idxd->idxd.chan_per_device = (kernel_idxd->idxd.total_wq_size >= 128) ? 8 : 4; 168 169 kernel_idxd->idxd.batch_size = accfg_wq_get_max_batch_size(wq); 170 171 /* We only use a single WQ, so once we've found one we can stop looking. */ 172 break; 173 } 174 175 if (kernel_idxd->idxd.total_wq_size > 0) { 176 /* This device has at least 1 WQ available, so ask the user if they want to use it. */ 177 attach_cb(cb_ctx, &kernel_idxd->idxd); 178 } else { 179 kernel_idxd_device_destruct(&kernel_idxd->idxd); 180 } 181 } 182 183 /* Release the reference used for configuration. */ 184 accfg_unref(ctx); 185 186 return 0; 187 } 188 189 static void 190 kernel_idxd_dump_sw_error(struct spdk_idxd_device *idxd, void *portal) 191 { 192 /* Need to be enhanced later */ 193 } 194 195 static char * 196 kernel_idxd_portal_get_addr(struct spdk_idxd_device *idxd) 197 { 198 struct spdk_kernel_idxd_device *kernel_idxd = __kernel_idxd(idxd); 199 200 return kernel_idxd->portal; 201 } 202 203 static struct spdk_idxd_impl g_kernel_idxd_impl = { 204 .name = "kernel", 205 .probe = kernel_idxd_probe, 206 .destruct = kernel_idxd_device_destruct, 207 .dump_sw_error = kernel_idxd_dump_sw_error, 208 .portal_get_addr = kernel_idxd_portal_get_addr, 209 }; 210 211 SPDK_IDXD_IMPL_REGISTER(kernel, &g_kernel_idxd_impl); 212