1 /* SPDX-License-Identifier: BSD-3-Clause 2 * Copyright(C) 2020 Broadcom. 3 * All rights reserved. 4 */ 5 6 #include <dirent.h> 7 #include <stdbool.h> 8 #include <stdlib.h> 9 #include <sys/queue.h> 10 11 #include <rte_malloc.h> 12 #include <rte_string_fns.h> 13 14 #include "bcmfs_device.h" 15 #include "bcmfs_logs.h" 16 #include "bcmfs_qp.h" 17 #include "bcmfs_vfio.h" 18 #include "bcmfs_sym_pmd.h" 19 20 struct bcmfs_device_attr { 21 const char name[BCMFS_MAX_PATH_LEN]; 22 const char suffix[BCMFS_DEV_NAME_LEN]; 23 const enum bcmfs_device_type type; 24 const uint32_t offset; 25 const uint32_t version; 26 }; 27 28 /* BCMFS supported devices */ 29 static struct bcmfs_device_attr dev_table[] = { 30 { 31 .name = "fs4", 32 .suffix = "crypto_mbox", 33 .type = BCMFS_SYM_FS4, 34 .offset = 0, 35 .version = BCMFS_SYM_FS4_VERSION 36 }, 37 { 38 .name = "fs5", 39 .suffix = "mbox", 40 .type = BCMFS_SYM_FS5, 41 .offset = 0, 42 .version = BCMFS_SYM_FS5_VERSION 43 }, 44 { 45 /* sentinel */ 46 } 47 }; 48 49 struct bcmfs_hw_queue_pair_ops_table bcmfs_hw_queue_pair_ops_table = { 50 .tl = RTE_SPINLOCK_INITIALIZER, 51 .num_ops = 0 52 }; 53 54 int bcmfs_hw_queue_pair_register_ops(const struct bcmfs_hw_queue_pair_ops *h) 55 { 56 struct bcmfs_hw_queue_pair_ops *ops; 57 int16_t ops_index; 58 59 rte_spinlock_lock(&bcmfs_hw_queue_pair_ops_table.tl); 60 61 if (h->enq_one_req == NULL || h->dequeue == NULL || 62 h->ring_db == NULL || h->startq == NULL || h->stopq == NULL) { 63 rte_spinlock_unlock(&bcmfs_hw_queue_pair_ops_table.tl); 64 BCMFS_LOG(ERR, 65 "Missing callback while registering device ops"); 66 return -EINVAL; 67 } 68 69 if (strlen(h->name) >= sizeof(ops->name) - 1) { 70 rte_spinlock_unlock(&bcmfs_hw_queue_pair_ops_table.tl); 71 BCMFS_LOG(ERR, "%s(): fs device_ops <%s>: name too long", 72 __func__, h->name); 73 return -EEXIST; 74 } 75 76 ops_index = bcmfs_hw_queue_pair_ops_table.num_ops++; 77 ops = &bcmfs_hw_queue_pair_ops_table.qp_ops[ops_index]; 78 strlcpy(ops->name, h->name, sizeof(ops->name)); 79 ops->enq_one_req = h->enq_one_req; 80 ops->dequeue = h->dequeue; 81 ops->ring_db = h->ring_db; 82 ops->startq = h->startq; 83 ops->stopq = h->stopq; 84 85 rte_spinlock_unlock(&bcmfs_hw_queue_pair_ops_table.tl); 86 87 return ops_index; 88 } 89 90 TAILQ_HEAD(fsdev_list, bcmfs_device); 91 static struct fsdev_list fsdev_list = TAILQ_HEAD_INITIALIZER(fsdev_list); 92 93 static struct bcmfs_device * 94 fsdev_allocate_one_dev(struct rte_vdev_device *vdev, 95 char *dirpath, 96 char *devname, 97 enum bcmfs_device_type dev_type __rte_unused) 98 { 99 struct bcmfs_device *fsdev; 100 uint32_t i; 101 102 fsdev = rte_calloc(__func__, 1, sizeof(*fsdev), 0); 103 if (!fsdev) 104 return NULL; 105 106 if (strlen(dirpath) > sizeof(fsdev->dirname)) { 107 BCMFS_LOG(ERR, "dir path name is too long"); 108 goto cleanup; 109 } 110 111 if (strlen(devname) > sizeof(fsdev->name)) { 112 BCMFS_LOG(ERR, "devname is too long"); 113 goto cleanup; 114 } 115 116 /* check if registered ops name is present in directory path */ 117 for (i = 0; i < bcmfs_hw_queue_pair_ops_table.num_ops; i++) 118 if (strstr(dirpath, 119 bcmfs_hw_queue_pair_ops_table.qp_ops[i].name)) 120 fsdev->sym_hw_qp_ops = 121 &bcmfs_hw_queue_pair_ops_table.qp_ops[i]; 122 if (!fsdev->sym_hw_qp_ops) 123 goto cleanup; 124 125 strcpy(fsdev->dirname, dirpath); 126 strcpy(fsdev->name, devname); 127 128 fsdev->vdev = vdev; 129 130 /* attach to VFIO */ 131 if (bcmfs_attach_vfio(fsdev)) 132 goto cleanup; 133 134 /* Maximum number of QPs supported */ 135 fsdev->max_hw_qps = fsdev->mmap_size / BCMFS_HW_QUEUE_IO_ADDR_LEN; 136 137 TAILQ_INSERT_TAIL(&fsdev_list, fsdev, next); 138 139 return fsdev; 140 141 cleanup: 142 rte_free(fsdev); 143 144 return NULL; 145 } 146 147 static struct bcmfs_device * 148 find_fsdev(struct rte_vdev_device *vdev) 149 { 150 struct bcmfs_device *fsdev; 151 152 TAILQ_FOREACH(fsdev, &fsdev_list, next) 153 if (fsdev->vdev == vdev) 154 return fsdev; 155 156 return NULL; 157 } 158 159 static void 160 fsdev_release(struct bcmfs_device *fsdev) 161 { 162 if (fsdev == NULL) 163 return; 164 165 TAILQ_REMOVE(&fsdev_list, fsdev, next); 166 rte_free(fsdev); 167 } 168 169 static int 170 cmprator(const void *a, const void *b) 171 { 172 return (*(const unsigned int *)a - *(const unsigned int *)b); 173 } 174 175 static int 176 fsdev_find_all_devs(const char *path, const char *search, 177 uint32_t *devs) 178 { 179 DIR *dir; 180 struct dirent *entry; 181 int count = 0; 182 char addr[BCMFS_MAX_NODES][BCMFS_MAX_PATH_LEN]; 183 int i; 184 185 dir = opendir(path); 186 if (dir == NULL) { 187 BCMFS_LOG(ERR, "Unable to open directory"); 188 return 0; 189 } 190 191 while ((entry = readdir(dir)) != NULL) { 192 if (strstr(entry->d_name, search)) { 193 strlcpy(addr[count], entry->d_name, 194 BCMFS_MAX_PATH_LEN); 195 count++; 196 } 197 } 198 199 closedir(dir); 200 201 for (i = 0 ; i < count; i++) 202 devs[i] = (uint32_t)strtoul(addr[i], NULL, 16); 203 /* sort the devices based on IO addresses */ 204 qsort(devs, count, sizeof(uint32_t), cmprator); 205 206 return count; 207 } 208 209 static bool 210 fsdev_find_sub_dir(char *path, const char *search, char *output) 211 { 212 DIR *dir; 213 struct dirent *entry; 214 215 dir = opendir(path); 216 if (dir == NULL) { 217 BCMFS_LOG(ERR, "Unable to open directory"); 218 return -ENODEV; 219 } 220 221 while ((entry = readdir(dir)) != NULL) { 222 if (!strcmp(entry->d_name, search)) { 223 strlcpy(output, entry->d_name, BCMFS_MAX_PATH_LEN); 224 closedir(dir); 225 return true; 226 } 227 } 228 229 closedir(dir); 230 231 return false; 232 } 233 234 235 static int 236 bcmfs_vdev_probe(struct rte_vdev_device *vdev) 237 { 238 struct bcmfs_device *fsdev; 239 char top_dirpath[BCMFS_MAX_PATH_LEN]; 240 char sub_dirpath[BCMFS_MAX_PATH_LEN]; 241 char out_dirpath[BCMFS_MAX_PATH_LEN]; 242 char out_dirname[BCMFS_MAX_PATH_LEN]; 243 uint32_t fsdev_dev[BCMFS_MAX_NODES]; 244 enum bcmfs_device_type dtype; 245 int err; 246 int i = 0; 247 int dev_idx; 248 int count = 0; 249 bool found = false; 250 251 sprintf(top_dirpath, "%s", SYSFS_BCM_PLTFORM_DEVICES); 252 while (strlen(dev_table[i].name)) { 253 found = fsdev_find_sub_dir(top_dirpath, 254 dev_table[i].name, 255 sub_dirpath); 256 if (found) 257 break; 258 i++; 259 } 260 if (!found) { 261 BCMFS_LOG(ERR, "No supported bcmfs dev found"); 262 return -ENODEV; 263 } 264 265 dev_idx = i; 266 dtype = dev_table[i].type; 267 268 snprintf(out_dirpath, sizeof(out_dirpath), "%s/%s", 269 top_dirpath, sub_dirpath); 270 count = fsdev_find_all_devs(out_dirpath, 271 dev_table[dev_idx].suffix, 272 fsdev_dev); 273 if (!count) { 274 BCMFS_LOG(ERR, "No supported bcmfs dev found"); 275 return -ENODEV; 276 } 277 278 i = 0; 279 while (count) { 280 /* format the device name present in the patch */ 281 snprintf(out_dirname, sizeof(out_dirname), "%x.%s", 282 fsdev_dev[i], dev_table[dev_idx].suffix); 283 fsdev = fsdev_allocate_one_dev(vdev, out_dirpath, 284 out_dirname, dtype); 285 if (!fsdev) { 286 count--; 287 i++; 288 continue; 289 } 290 break; 291 } 292 if (fsdev == NULL) { 293 BCMFS_LOG(ERR, "All supported devs busy"); 294 return -ENODEV; 295 } 296 297 err = bcmfs_sym_dev_create(fsdev); 298 if (err) { 299 BCMFS_LOG(WARNING, 300 "Failed to create BCMFS SYM PMD for device %s", 301 fsdev->name); 302 goto pmd_create_fail; 303 } 304 305 return 0; 306 307 pmd_create_fail: 308 fsdev_release(fsdev); 309 310 return err; 311 } 312 313 static int 314 bcmfs_vdev_remove(struct rte_vdev_device *vdev) 315 { 316 struct bcmfs_device *fsdev; 317 318 fsdev = find_fsdev(vdev); 319 if (fsdev == NULL) 320 return -ENODEV; 321 322 fsdev_release(fsdev); 323 return 0; 324 } 325 326 /* Register with vdev */ 327 static struct rte_vdev_driver rte_bcmfs_pmd = { 328 .probe = bcmfs_vdev_probe, 329 .remove = bcmfs_vdev_remove 330 }; 331 332 RTE_PMD_REGISTER_VDEV(bcmfs_pmd, 333 rte_bcmfs_pmd); 334