1 /* SPDX-License-Identifier: BSD-3-Clause 2 * Copyright (C) 2020 Intel Corporation. 3 * All rights reserved. 4 */ 5 6 #include "accel_ioat.h" 7 8 #include "spdk/stdinc.h" 9 10 #include "spdk_internal/accel_module.h" 11 #include "spdk/log.h" 12 #include "spdk/likely.h" 13 14 #include "spdk/env.h" 15 #include "spdk/event.h" 16 #include "spdk/thread.h" 17 #include "spdk/ioat.h" 18 19 static bool g_ioat_enable = false; 20 static bool g_ioat_initialized = false; 21 22 struct ioat_device { 23 struct spdk_ioat_chan *ioat; 24 bool is_allocated; 25 /** linked list pointer for device list */ 26 TAILQ_ENTRY(ioat_device) tailq; 27 }; 28 29 struct pci_device { 30 struct spdk_pci_device *pci_dev; 31 TAILQ_ENTRY(pci_device) tailq; 32 }; 33 34 static TAILQ_HEAD(, ioat_device) g_devices = TAILQ_HEAD_INITIALIZER(g_devices); 35 static pthread_mutex_t g_ioat_mutex = PTHREAD_MUTEX_INITIALIZER; 36 37 static TAILQ_HEAD(, pci_device) g_pci_devices = TAILQ_HEAD_INITIALIZER(g_pci_devices); 38 39 struct ioat_io_channel { 40 struct spdk_ioat_chan *ioat_ch; 41 struct ioat_device *ioat_dev; 42 struct spdk_poller *poller; 43 }; 44 45 static struct ioat_device * 46 ioat_allocate_device(void) 47 { 48 struct ioat_device *dev; 49 50 pthread_mutex_lock(&g_ioat_mutex); 51 TAILQ_FOREACH(dev, &g_devices, tailq) { 52 if (!dev->is_allocated) { 53 dev->is_allocated = true; 54 pthread_mutex_unlock(&g_ioat_mutex); 55 return dev; 56 } 57 } 58 pthread_mutex_unlock(&g_ioat_mutex); 59 60 return NULL; 61 } 62 63 static void 64 ioat_free_device(struct ioat_device *dev) 65 { 66 pthread_mutex_lock(&g_ioat_mutex); 67 dev->is_allocated = false; 68 pthread_mutex_unlock(&g_ioat_mutex); 69 } 70 71 static int accel_ioat_init(void); 72 static void accel_ioat_exit(void *ctx); 73 static bool ioat_supports_opcode(enum accel_opcode opc); 74 static struct spdk_io_channel *ioat_get_io_channel(void); 75 static int ioat_submit_tasks(struct spdk_io_channel *ch, struct spdk_accel_task *accel_task); 76 77 static size_t 78 accel_ioat_get_ctx_size(void) 79 { 80 return sizeof(struct spdk_accel_task); 81 } 82 83 static struct spdk_accel_module_if g_ioat_module = { 84 .module_init = accel_ioat_init, 85 .module_fini = accel_ioat_exit, 86 .write_config_json = NULL, 87 .get_ctx_size = accel_ioat_get_ctx_size, 88 .name = "ioat", 89 .supports_opcode = ioat_supports_opcode, 90 .get_io_channel = ioat_get_io_channel, 91 .submit_tasks = ioat_submit_tasks 92 }; 93 94 SPDK_ACCEL_MODULE_REGISTER(ioat, &g_ioat_module) 95 96 static void 97 ioat_done(void *cb_arg) 98 { 99 struct spdk_accel_task *accel_task = cb_arg; 100 101 spdk_accel_task_complete(accel_task, 0); 102 } 103 104 static int 105 ioat_poll(void *arg) 106 { 107 struct spdk_ioat_chan *chan = arg; 108 109 return spdk_ioat_process_events(chan) != 0 ? SPDK_POLLER_BUSY : 110 SPDK_POLLER_IDLE; 111 } 112 113 static struct spdk_io_channel *ioat_get_io_channel(void); 114 115 static bool 116 ioat_supports_opcode(enum accel_opcode opc) 117 { 118 if (!g_ioat_initialized) { 119 return false; 120 } 121 122 switch (opc) { 123 case ACCEL_OPC_COPY: 124 case ACCEL_OPC_FILL: 125 return true; 126 default: 127 return false; 128 } 129 130 } 131 132 static int 133 ioat_submit_fill(struct ioat_io_channel *ioat_ch, struct spdk_accel_task *task) 134 { 135 if (spdk_unlikely(task->d.iovcnt != 1)) { 136 return -EINVAL; 137 } 138 139 return spdk_ioat_build_fill(ioat_ch->ioat_ch, task, ioat_done, 140 task->d.iovs[0].iov_base, task->fill_pattern, 141 task->d.iovs[0].iov_len); 142 } 143 144 static int 145 ioat_submit_copy(struct ioat_io_channel *ioat_ch, struct spdk_accel_task *task) 146 { 147 if (spdk_unlikely(task->d.iovcnt != 1 || task->s.iovcnt != 1)) { 148 return -EINVAL; 149 } 150 151 if (spdk_unlikely(task->d.iovs[0].iov_len != task->s.iovs[0].iov_len)) { 152 return -EINVAL; 153 } 154 155 return spdk_ioat_build_copy(ioat_ch->ioat_ch, task, ioat_done, 156 task->d.iovs[0].iov_base, task->s.iovs[0].iov_base, 157 task->d.iovs[0].iov_len); 158 } 159 160 static int 161 ioat_submit_tasks(struct spdk_io_channel *ch, struct spdk_accel_task *accel_task) 162 { 163 struct ioat_io_channel *ioat_ch = spdk_io_channel_get_ctx(ch); 164 struct spdk_accel_task *tmp; 165 int rc = 0; 166 167 do { 168 switch (accel_task->op_code) { 169 case ACCEL_OPC_FILL: 170 rc = ioat_submit_fill(ioat_ch, accel_task); 171 break; 172 case ACCEL_OPC_COPY: 173 rc = ioat_submit_copy(ioat_ch, accel_task); 174 break; 175 default: 176 assert(false); 177 break; 178 } 179 180 tmp = TAILQ_NEXT(accel_task, link); 181 182 /* Report any build errors via the callback now. */ 183 if (rc) { 184 spdk_accel_task_complete(accel_task, rc); 185 } 186 187 accel_task = tmp; 188 } while (accel_task); 189 190 spdk_ioat_flush(ioat_ch->ioat_ch); 191 192 return 0; 193 } 194 195 static int 196 ioat_create_cb(void *io_device, void *ctx_buf) 197 { 198 struct ioat_io_channel *ch = ctx_buf; 199 struct ioat_device *ioat_dev; 200 201 ioat_dev = ioat_allocate_device(); 202 if (ioat_dev == NULL) { 203 return -1; 204 } 205 206 ch->ioat_dev = ioat_dev; 207 ch->ioat_ch = ioat_dev->ioat; 208 ch->poller = SPDK_POLLER_REGISTER(ioat_poll, ch->ioat_ch, 0); 209 210 return 0; 211 } 212 213 static void 214 ioat_destroy_cb(void *io_device, void *ctx_buf) 215 { 216 struct ioat_io_channel *ch = ctx_buf; 217 218 ioat_free_device(ch->ioat_dev); 219 spdk_poller_unregister(&ch->poller); 220 } 221 222 static struct spdk_io_channel * 223 ioat_get_io_channel(void) 224 { 225 return spdk_get_io_channel(&g_ioat_module); 226 } 227 228 static bool 229 probe_cb(void *cb_ctx, struct spdk_pci_device *pci_dev) 230 { 231 struct spdk_pci_addr pci_addr = spdk_pci_device_get_addr(pci_dev); 232 struct pci_device *pdev; 233 234 SPDK_INFOLOG(accel_ioat, 235 " Found matching device at %04x:%02x:%02x.%x vendor:0x%04x device:0x%04x\n", 236 pci_addr.domain, 237 pci_addr.bus, 238 pci_addr.dev, 239 pci_addr.func, 240 spdk_pci_device_get_vendor_id(pci_dev), 241 spdk_pci_device_get_device_id(pci_dev)); 242 243 pdev = calloc(1, sizeof(*pdev)); 244 if (pdev == NULL) { 245 return false; 246 } 247 pdev->pci_dev = pci_dev; 248 TAILQ_INSERT_TAIL(&g_pci_devices, pdev, tailq); 249 250 /* Claim the device in case conflict with other process */ 251 if (spdk_pci_device_claim(pci_dev) < 0) { 252 return false; 253 } 254 255 return true; 256 } 257 258 static void 259 attach_cb(void *cb_ctx, struct spdk_pci_device *pci_dev, struct spdk_ioat_chan *ioat) 260 { 261 struct ioat_device *dev; 262 263 dev = calloc(1, sizeof(*dev)); 264 if (dev == NULL) { 265 SPDK_ERRLOG("Failed to allocate device struct\n"); 266 return; 267 } 268 269 dev->ioat = ioat; 270 TAILQ_INSERT_TAIL(&g_devices, dev, tailq); 271 } 272 273 void 274 accel_ioat_enable_probe(void) 275 { 276 g_ioat_enable = true; 277 } 278 279 static int 280 accel_ioat_init(void) 281 { 282 if (!g_ioat_enable) { 283 return 0; 284 } 285 286 if (spdk_ioat_probe(NULL, probe_cb, attach_cb) != 0) { 287 SPDK_ERRLOG("spdk_ioat_probe() failed\n"); 288 return -1; 289 } 290 291 if (TAILQ_EMPTY(&g_devices)) { 292 SPDK_NOTICELOG("No available ioat devices\n"); 293 return -1; 294 } 295 296 g_ioat_initialized = true; 297 SPDK_NOTICELOG("Accel framework IOAT module initialized.\n"); 298 spdk_io_device_register(&g_ioat_module, ioat_create_cb, ioat_destroy_cb, 299 sizeof(struct ioat_io_channel), "ioat_accel_module"); 300 return 0; 301 } 302 303 static void 304 _device_unregister_cb(void *io_device) 305 { 306 struct ioat_device *dev = io_device; 307 struct pci_device *pci_dev; 308 309 while (!TAILQ_EMPTY(&g_devices)) { 310 dev = TAILQ_FIRST(&g_devices); 311 TAILQ_REMOVE(&g_devices, dev, tailq); 312 spdk_ioat_detach(dev->ioat); 313 free(dev); 314 } 315 316 while (!TAILQ_EMPTY(&g_pci_devices)) { 317 pci_dev = TAILQ_FIRST(&g_pci_devices); 318 TAILQ_REMOVE(&g_pci_devices, pci_dev, tailq); 319 spdk_pci_device_detach(pci_dev->pci_dev); 320 free(pci_dev); 321 } 322 323 g_ioat_initialized = false; 324 325 spdk_accel_module_finish(); 326 } 327 328 static void 329 accel_ioat_exit(void *ctx) 330 { 331 if (g_ioat_initialized) { 332 spdk_io_device_unregister(&g_ioat_module, _device_unregister_cb); 333 } else { 334 spdk_accel_module_finish(); 335 } 336 } 337 338 SPDK_LOG_REGISTER_COMPONENT(accel_ioat) 339