1ef4b04f8SRavi Kumar /* SPDX-License-Identifier: BSD-3-Clause 2ef4b04f8SRavi Kumar * Copyright(c) 2018 Advanced Micro Devices, Inc. All rights reserved. 3ef4b04f8SRavi Kumar */ 4ef4b04f8SRavi Kumar 5ef4b04f8SRavi Kumar #include <dirent.h> 6ef4b04f8SRavi Kumar #include <fcntl.h> 7ef4b04f8SRavi Kumar #include <stdio.h> 8ef4b04f8SRavi Kumar #include <string.h> 9ef4b04f8SRavi Kumar #include <sys/mman.h> 10ef4b04f8SRavi Kumar #include <sys/queue.h> 11ef4b04f8SRavi Kumar #include <sys/types.h> 12ef4b04f8SRavi Kumar #include <sys/file.h> 13ef4b04f8SRavi Kumar #include <unistd.h> 14ef4b04f8SRavi Kumar 15ef4b04f8SRavi Kumar #include <rte_hexdump.h> 16ef4b04f8SRavi Kumar #include <rte_memzone.h> 17ef4b04f8SRavi Kumar #include <rte_malloc.h> 18ef4b04f8SRavi Kumar #include <rte_memory.h> 19ef4b04f8SRavi Kumar #include <rte_spinlock.h> 20ef4b04f8SRavi Kumar #include <rte_string_fns.h> 21ef4b04f8SRavi Kumar 22ef4b04f8SRavi Kumar #include "ccp_dev.h" 23ef4b04f8SRavi Kumar #include "ccp_pci.h" 24ef4b04f8SRavi Kumar #include "ccp_pmd_private.h" 25ef4b04f8SRavi Kumar 26ef4b04f8SRavi Kumar struct ccp_list ccp_list = TAILQ_HEAD_INITIALIZER(ccp_list); 27ef4b04f8SRavi Kumar static int ccp_dev_id; 28ef4b04f8SRavi Kumar 293c20cf98SRavi Kumar int 303c20cf98SRavi Kumar ccp_dev_start(struct rte_cryptodev *dev) 313c20cf98SRavi Kumar { 323c20cf98SRavi Kumar struct ccp_private *priv = dev->data->dev_private; 333c20cf98SRavi Kumar 343c20cf98SRavi Kumar priv->last_dev = TAILQ_FIRST(&ccp_list); 353c20cf98SRavi Kumar return 0; 363c20cf98SRavi Kumar } 373c20cf98SRavi Kumar 3870f0f8a8SRavi Kumar struct ccp_queue * 3970f0f8a8SRavi Kumar ccp_allot_queue(struct rte_cryptodev *cdev, int slot_req) 4070f0f8a8SRavi Kumar { 4170f0f8a8SRavi Kumar int i, ret = 0; 4270f0f8a8SRavi Kumar struct ccp_device *dev; 4370f0f8a8SRavi Kumar struct ccp_private *priv = cdev->data->dev_private; 4470f0f8a8SRavi Kumar 4570f0f8a8SRavi Kumar dev = TAILQ_NEXT(priv->last_dev, next); 4670f0f8a8SRavi Kumar if (unlikely(dev == NULL)) 4770f0f8a8SRavi Kumar dev = TAILQ_FIRST(&ccp_list); 4870f0f8a8SRavi Kumar priv->last_dev = dev; 4970f0f8a8SRavi Kumar if (dev->qidx >= dev->cmd_q_count) 5070f0f8a8SRavi Kumar dev->qidx = 0; 5170f0f8a8SRavi Kumar ret = rte_atomic64_read(&dev->cmd_q[dev->qidx].free_slots); 5270f0f8a8SRavi Kumar if (ret >= slot_req) 5370f0f8a8SRavi Kumar return &dev->cmd_q[dev->qidx]; 5470f0f8a8SRavi Kumar for (i = 0; i < dev->cmd_q_count; i++) { 5570f0f8a8SRavi Kumar dev->qidx++; 5670f0f8a8SRavi Kumar if (dev->qidx >= dev->cmd_q_count) 5770f0f8a8SRavi Kumar dev->qidx = 0; 5870f0f8a8SRavi Kumar ret = rte_atomic64_read(&dev->cmd_q[dev->qidx].free_slots); 5970f0f8a8SRavi Kumar if (ret >= slot_req) 6070f0f8a8SRavi Kumar return &dev->cmd_q[dev->qidx]; 6170f0f8a8SRavi Kumar } 6270f0f8a8SRavi Kumar return NULL; 6370f0f8a8SRavi Kumar } 6470f0f8a8SRavi Kumar 65585d4037SRavi Kumar int 66585d4037SRavi Kumar ccp_read_hwrng(uint32_t *value) 67585d4037SRavi Kumar { 68585d4037SRavi Kumar struct ccp_device *dev; 69585d4037SRavi Kumar 70585d4037SRavi Kumar TAILQ_FOREACH(dev, &ccp_list, next) { 71585d4037SRavi Kumar void *vaddr = (void *)(dev->pci.mem_resource[2].addr); 72585d4037SRavi Kumar 73585d4037SRavi Kumar while (dev->hwrng_retries++ < CCP_MAX_TRNG_RETRIES) { 74585d4037SRavi Kumar *value = CCP_READ_REG(vaddr, TRNG_OUT_REG); 75585d4037SRavi Kumar if (*value) { 76585d4037SRavi Kumar dev->hwrng_retries = 0; 77585d4037SRavi Kumar return 0; 78585d4037SRavi Kumar } 79585d4037SRavi Kumar } 80585d4037SRavi Kumar dev->hwrng_retries = 0; 81585d4037SRavi Kumar } 82585d4037SRavi Kumar return -1; 83585d4037SRavi Kumar } 84585d4037SRavi Kumar 85ef4b04f8SRavi Kumar static const struct rte_memzone * 86ef4b04f8SRavi Kumar ccp_queue_dma_zone_reserve(const char *queue_name, 87ef4b04f8SRavi Kumar uint32_t queue_size, 88ef4b04f8SRavi Kumar int socket_id) 89ef4b04f8SRavi Kumar { 90ef4b04f8SRavi Kumar const struct rte_memzone *mz; 91ef4b04f8SRavi Kumar 92ef4b04f8SRavi Kumar mz = rte_memzone_lookup(queue_name); 93ef4b04f8SRavi Kumar if (mz != 0) { 94ef4b04f8SRavi Kumar if (((size_t)queue_size <= mz->len) && 95ef4b04f8SRavi Kumar ((socket_id == SOCKET_ID_ANY) || 96ef4b04f8SRavi Kumar (socket_id == mz->socket_id))) { 97ef4b04f8SRavi Kumar CCP_LOG_INFO("re-use memzone already " 98ef4b04f8SRavi Kumar "allocated for %s", queue_name); 99ef4b04f8SRavi Kumar return mz; 100ef4b04f8SRavi Kumar } 101ef4b04f8SRavi Kumar CCP_LOG_ERR("Incompatible memzone already " 102ef4b04f8SRavi Kumar "allocated %s, size %u, socket %d. " 103ef4b04f8SRavi Kumar "Requested size %u, socket %u", 104ef4b04f8SRavi Kumar queue_name, (uint32_t)mz->len, 105ef4b04f8SRavi Kumar mz->socket_id, queue_size, socket_id); 106ef4b04f8SRavi Kumar return NULL; 107ef4b04f8SRavi Kumar } 108ef4b04f8SRavi Kumar 109ef4b04f8SRavi Kumar CCP_LOG_INFO("Allocate memzone for %s, size %u on socket %u", 110ef4b04f8SRavi Kumar queue_name, queue_size, socket_id); 111ef4b04f8SRavi Kumar 112ef4b04f8SRavi Kumar return rte_memzone_reserve_aligned(queue_name, queue_size, 113ef4b04f8SRavi Kumar socket_id, RTE_MEMZONE_IOVA_CONTIG, queue_size); 114ef4b04f8SRavi Kumar } 115ef4b04f8SRavi Kumar 116ef4b04f8SRavi Kumar /* bitmap support apis */ 117ef4b04f8SRavi Kumar static inline void 118ef4b04f8SRavi Kumar ccp_set_bit(unsigned long *bitmap, int n) 119ef4b04f8SRavi Kumar { 120ef4b04f8SRavi Kumar __sync_fetch_and_or(&bitmap[WORD_OFFSET(n)], (1UL << BIT_OFFSET(n))); 121ef4b04f8SRavi Kumar } 122ef4b04f8SRavi Kumar 123ef4b04f8SRavi Kumar static inline void 124ef4b04f8SRavi Kumar ccp_clear_bit(unsigned long *bitmap, int n) 125ef4b04f8SRavi Kumar { 126ef4b04f8SRavi Kumar __sync_fetch_and_and(&bitmap[WORD_OFFSET(n)], ~(1UL << BIT_OFFSET(n))); 127ef4b04f8SRavi Kumar } 128ef4b04f8SRavi Kumar 129ef4b04f8SRavi Kumar static inline uint32_t 130ef4b04f8SRavi Kumar ccp_get_bit(unsigned long *bitmap, int n) 131ef4b04f8SRavi Kumar { 132ef4b04f8SRavi Kumar return ((bitmap[WORD_OFFSET(n)] & (1 << BIT_OFFSET(n))) != 0); 133ef4b04f8SRavi Kumar } 134ef4b04f8SRavi Kumar 135ef4b04f8SRavi Kumar 136ef4b04f8SRavi Kumar static inline uint32_t 137ef4b04f8SRavi Kumar ccp_ffz(unsigned long word) 138ef4b04f8SRavi Kumar { 139ef4b04f8SRavi Kumar unsigned long first_zero; 140ef4b04f8SRavi Kumar 141ef4b04f8SRavi Kumar first_zero = __builtin_ffsl(~word); 142ef4b04f8SRavi Kumar return first_zero ? (first_zero - 1) : 143ef4b04f8SRavi Kumar BITS_PER_WORD; 144ef4b04f8SRavi Kumar } 145ef4b04f8SRavi Kumar 146ef4b04f8SRavi Kumar static inline uint32_t 147ef4b04f8SRavi Kumar ccp_find_first_zero_bit(unsigned long *addr, uint32_t limit) 148ef4b04f8SRavi Kumar { 149ef4b04f8SRavi Kumar uint32_t i; 150ef4b04f8SRavi Kumar uint32_t nwords = 0; 151ef4b04f8SRavi Kumar 152ef4b04f8SRavi Kumar nwords = (limit - 1) / BITS_PER_WORD + 1; 153ef4b04f8SRavi Kumar for (i = 0; i < nwords; i++) { 154ef4b04f8SRavi Kumar if (addr[i] == 0UL) 155ef4b04f8SRavi Kumar return i * BITS_PER_WORD; 156ef4b04f8SRavi Kumar if (addr[i] < ~(0UL)) 157ef4b04f8SRavi Kumar break; 158ef4b04f8SRavi Kumar } 159ef4b04f8SRavi Kumar return (i == nwords) ? limit : i * BITS_PER_WORD + ccp_ffz(addr[i]); 160ef4b04f8SRavi Kumar } 161ef4b04f8SRavi Kumar 162ef4b04f8SRavi Kumar static void 163ef4b04f8SRavi Kumar ccp_bitmap_set(unsigned long *map, unsigned int start, int len) 164ef4b04f8SRavi Kumar { 165ef4b04f8SRavi Kumar unsigned long *p = map + WORD_OFFSET(start); 166ef4b04f8SRavi Kumar const unsigned int size = start + len; 167ef4b04f8SRavi Kumar int bits_to_set = BITS_PER_WORD - (start % BITS_PER_WORD); 168ef4b04f8SRavi Kumar unsigned long mask_to_set = CCP_BITMAP_FIRST_WORD_MASK(start); 169ef4b04f8SRavi Kumar 170ef4b04f8SRavi Kumar while (len - bits_to_set >= 0) { 171ef4b04f8SRavi Kumar *p |= mask_to_set; 172ef4b04f8SRavi Kumar len -= bits_to_set; 173ef4b04f8SRavi Kumar bits_to_set = BITS_PER_WORD; 174ef4b04f8SRavi Kumar mask_to_set = ~0UL; 175ef4b04f8SRavi Kumar p++; 176ef4b04f8SRavi Kumar } 177ef4b04f8SRavi Kumar if (len) { 178ef4b04f8SRavi Kumar mask_to_set &= CCP_BITMAP_LAST_WORD_MASK(size); 179ef4b04f8SRavi Kumar *p |= mask_to_set; 180ef4b04f8SRavi Kumar } 181ef4b04f8SRavi Kumar } 182ef4b04f8SRavi Kumar 183ef4b04f8SRavi Kumar static void 184ef4b04f8SRavi Kumar ccp_bitmap_clear(unsigned long *map, unsigned int start, int len) 185ef4b04f8SRavi Kumar { 186ef4b04f8SRavi Kumar unsigned long *p = map + WORD_OFFSET(start); 187ef4b04f8SRavi Kumar const unsigned int size = start + len; 188ef4b04f8SRavi Kumar int bits_to_clear = BITS_PER_WORD - (start % BITS_PER_WORD); 189ef4b04f8SRavi Kumar unsigned long mask_to_clear = CCP_BITMAP_FIRST_WORD_MASK(start); 190ef4b04f8SRavi Kumar 191ef4b04f8SRavi Kumar while (len - bits_to_clear >= 0) { 192ef4b04f8SRavi Kumar *p &= ~mask_to_clear; 193ef4b04f8SRavi Kumar len -= bits_to_clear; 194ef4b04f8SRavi Kumar bits_to_clear = BITS_PER_WORD; 195ef4b04f8SRavi Kumar mask_to_clear = ~0UL; 196ef4b04f8SRavi Kumar p++; 197ef4b04f8SRavi Kumar } 198ef4b04f8SRavi Kumar if (len) { 199ef4b04f8SRavi Kumar mask_to_clear &= CCP_BITMAP_LAST_WORD_MASK(size); 200ef4b04f8SRavi Kumar *p &= ~mask_to_clear; 201ef4b04f8SRavi Kumar } 202ef4b04f8SRavi Kumar } 203ef4b04f8SRavi Kumar 204ef4b04f8SRavi Kumar 205ef4b04f8SRavi Kumar static unsigned long 206ef4b04f8SRavi Kumar _ccp_find_next_bit(const unsigned long *addr, 207ef4b04f8SRavi Kumar unsigned long nbits, 208ef4b04f8SRavi Kumar unsigned long start, 209ef4b04f8SRavi Kumar unsigned long invert) 210ef4b04f8SRavi Kumar { 211ef4b04f8SRavi Kumar unsigned long tmp; 212ef4b04f8SRavi Kumar 213ef4b04f8SRavi Kumar if (!nbits || start >= nbits) 214ef4b04f8SRavi Kumar return nbits; 215ef4b04f8SRavi Kumar 216ef4b04f8SRavi Kumar tmp = addr[start / BITS_PER_WORD] ^ invert; 217ef4b04f8SRavi Kumar 218ef4b04f8SRavi Kumar /* Handle 1st word. */ 219ef4b04f8SRavi Kumar tmp &= CCP_BITMAP_FIRST_WORD_MASK(start); 220ef4b04f8SRavi Kumar start = ccp_round_down(start, BITS_PER_WORD); 221ef4b04f8SRavi Kumar 222ef4b04f8SRavi Kumar while (!tmp) { 223ef4b04f8SRavi Kumar start += BITS_PER_WORD; 224ef4b04f8SRavi Kumar if (start >= nbits) 225ef4b04f8SRavi Kumar return nbits; 226ef4b04f8SRavi Kumar 227ef4b04f8SRavi Kumar tmp = addr[start / BITS_PER_WORD] ^ invert; 228ef4b04f8SRavi Kumar } 229ef4b04f8SRavi Kumar 230ef4b04f8SRavi Kumar return RTE_MIN(start + (ffs(tmp) - 1), nbits); 231ef4b04f8SRavi Kumar } 232ef4b04f8SRavi Kumar 233ef4b04f8SRavi Kumar static unsigned long 234ef4b04f8SRavi Kumar ccp_find_next_bit(const unsigned long *addr, 235ef4b04f8SRavi Kumar unsigned long size, 236ef4b04f8SRavi Kumar unsigned long offset) 237ef4b04f8SRavi Kumar { 238ef4b04f8SRavi Kumar return _ccp_find_next_bit(addr, size, offset, 0UL); 239ef4b04f8SRavi Kumar } 240ef4b04f8SRavi Kumar 241ef4b04f8SRavi Kumar static unsigned long 242ef4b04f8SRavi Kumar ccp_find_next_zero_bit(const unsigned long *addr, 243ef4b04f8SRavi Kumar unsigned long size, 244ef4b04f8SRavi Kumar unsigned long offset) 245ef4b04f8SRavi Kumar { 246ef4b04f8SRavi Kumar return _ccp_find_next_bit(addr, size, offset, ~0UL); 247ef4b04f8SRavi Kumar } 248ef4b04f8SRavi Kumar 249ef4b04f8SRavi Kumar /** 250ef4b04f8SRavi Kumar * bitmap_find_next_zero_area - find a contiguous aligned zero area 251ef4b04f8SRavi Kumar * @map: The address to base the search on 252ef4b04f8SRavi Kumar * @size: The bitmap size in bits 253ef4b04f8SRavi Kumar * @start: The bitnumber to start searching at 254ef4b04f8SRavi Kumar * @nr: The number of zeroed bits we're looking for 255ef4b04f8SRavi Kumar */ 256ef4b04f8SRavi Kumar static unsigned long 257ef4b04f8SRavi Kumar ccp_bitmap_find_next_zero_area(unsigned long *map, 258ef4b04f8SRavi Kumar unsigned long size, 259ef4b04f8SRavi Kumar unsigned long start, 260ef4b04f8SRavi Kumar unsigned int nr) 261ef4b04f8SRavi Kumar { 262ef4b04f8SRavi Kumar unsigned long index, end, i; 263ef4b04f8SRavi Kumar 264ef4b04f8SRavi Kumar again: 265ef4b04f8SRavi Kumar index = ccp_find_next_zero_bit(map, size, start); 266ef4b04f8SRavi Kumar 267ef4b04f8SRavi Kumar end = index + nr; 268ef4b04f8SRavi Kumar if (end > size) 269ef4b04f8SRavi Kumar return end; 270ef4b04f8SRavi Kumar i = ccp_find_next_bit(map, end, index); 271ef4b04f8SRavi Kumar if (i < end) { 272ef4b04f8SRavi Kumar start = i + 1; 273ef4b04f8SRavi Kumar goto again; 274ef4b04f8SRavi Kumar } 275ef4b04f8SRavi Kumar return index; 276ef4b04f8SRavi Kumar } 277ef4b04f8SRavi Kumar 278ef4b04f8SRavi Kumar static uint32_t 279ef4b04f8SRavi Kumar ccp_lsb_alloc(struct ccp_queue *cmd_q, unsigned int count) 280ef4b04f8SRavi Kumar { 281ef4b04f8SRavi Kumar struct ccp_device *ccp; 282ef4b04f8SRavi Kumar int start; 283ef4b04f8SRavi Kumar 284ef4b04f8SRavi Kumar /* First look at the map for the queue */ 285ef4b04f8SRavi Kumar if (cmd_q->lsb >= 0) { 286ef4b04f8SRavi Kumar start = (uint32_t)ccp_bitmap_find_next_zero_area(cmd_q->lsbmap, 287ef4b04f8SRavi Kumar LSB_SIZE, 0, 288ef4b04f8SRavi Kumar count); 289ef4b04f8SRavi Kumar if (start < LSB_SIZE) { 290ef4b04f8SRavi Kumar ccp_bitmap_set(cmd_q->lsbmap, start, count); 291ef4b04f8SRavi Kumar return start + cmd_q->lsb * LSB_SIZE; 292ef4b04f8SRavi Kumar } 293ef4b04f8SRavi Kumar } 294ef4b04f8SRavi Kumar 295ef4b04f8SRavi Kumar /* try to get an entry from the shared blocks */ 296ef4b04f8SRavi Kumar ccp = cmd_q->dev; 297ef4b04f8SRavi Kumar 298ef4b04f8SRavi Kumar rte_spinlock_lock(&ccp->lsb_lock); 299ef4b04f8SRavi Kumar 300ef4b04f8SRavi Kumar start = (uint32_t)ccp_bitmap_find_next_zero_area(ccp->lsbmap, 301ef4b04f8SRavi Kumar MAX_LSB_CNT * LSB_SIZE, 302ef4b04f8SRavi Kumar 0, count); 303ef4b04f8SRavi Kumar if (start <= MAX_LSB_CNT * LSB_SIZE) { 304ef4b04f8SRavi Kumar ccp_bitmap_set(ccp->lsbmap, start, count); 305ef4b04f8SRavi Kumar rte_spinlock_unlock(&ccp->lsb_lock); 306ef4b04f8SRavi Kumar return start * LSB_ITEM_SIZE; 307ef4b04f8SRavi Kumar } 308ef4b04f8SRavi Kumar CCP_LOG_ERR("NO LSBs available"); 309ef4b04f8SRavi Kumar 310ef4b04f8SRavi Kumar rte_spinlock_unlock(&ccp->lsb_lock); 311ef4b04f8SRavi Kumar 312ef4b04f8SRavi Kumar return 0; 313ef4b04f8SRavi Kumar } 314ef4b04f8SRavi Kumar 315ef4b04f8SRavi Kumar static void __rte_unused 316ef4b04f8SRavi Kumar ccp_lsb_free(struct ccp_queue *cmd_q, 317ef4b04f8SRavi Kumar unsigned int start, 318ef4b04f8SRavi Kumar unsigned int count) 319ef4b04f8SRavi Kumar { 320ef4b04f8SRavi Kumar int lsbno = start / LSB_SIZE; 321ef4b04f8SRavi Kumar 322ef4b04f8SRavi Kumar if (!start) 323ef4b04f8SRavi Kumar return; 324ef4b04f8SRavi Kumar 325ef4b04f8SRavi Kumar if (cmd_q->lsb == lsbno) { 326ef4b04f8SRavi Kumar /* An entry from the private LSB */ 327ef4b04f8SRavi Kumar ccp_bitmap_clear(cmd_q->lsbmap, start % LSB_SIZE, count); 328ef4b04f8SRavi Kumar } else { 329ef4b04f8SRavi Kumar /* From the shared LSBs */ 330ef4b04f8SRavi Kumar struct ccp_device *ccp = cmd_q->dev; 331ef4b04f8SRavi Kumar 332ef4b04f8SRavi Kumar rte_spinlock_lock(&ccp->lsb_lock); 333ef4b04f8SRavi Kumar ccp_bitmap_clear(ccp->lsbmap, start, count); 334ef4b04f8SRavi Kumar rte_spinlock_unlock(&ccp->lsb_lock); 335ef4b04f8SRavi Kumar } 336ef4b04f8SRavi Kumar } 337ef4b04f8SRavi Kumar 338ef4b04f8SRavi Kumar static int 339ef4b04f8SRavi Kumar ccp_find_lsb_regions(struct ccp_queue *cmd_q, uint64_t status) 340ef4b04f8SRavi Kumar { 341ef4b04f8SRavi Kumar int q_mask = 1 << cmd_q->id; 342ef4b04f8SRavi Kumar int weight = 0; 343ef4b04f8SRavi Kumar int j; 344ef4b04f8SRavi Kumar 345ef4b04f8SRavi Kumar /* Build a bit mask to know which LSBs 346ef4b04f8SRavi Kumar * this queue has access to. 347ef4b04f8SRavi Kumar * Don't bother with segment 0 348ef4b04f8SRavi Kumar * as it has special 349ef4b04f8SRavi Kumar * privileges. 350ef4b04f8SRavi Kumar */ 351ef4b04f8SRavi Kumar cmd_q->lsbmask = 0; 352ef4b04f8SRavi Kumar status >>= LSB_REGION_WIDTH; 353ef4b04f8SRavi Kumar for (j = 1; j < MAX_LSB_CNT; j++) { 354ef4b04f8SRavi Kumar if (status & q_mask) 355ef4b04f8SRavi Kumar ccp_set_bit(&cmd_q->lsbmask, j); 356ef4b04f8SRavi Kumar 357ef4b04f8SRavi Kumar status >>= LSB_REGION_WIDTH; 358ef4b04f8SRavi Kumar } 359ef4b04f8SRavi Kumar 360ef4b04f8SRavi Kumar for (j = 0; j < MAX_LSB_CNT; j++) 361ef4b04f8SRavi Kumar if (ccp_get_bit(&cmd_q->lsbmask, j)) 362ef4b04f8SRavi Kumar weight++; 363ef4b04f8SRavi Kumar 364ef4b04f8SRavi Kumar printf("Queue %d can access %d LSB regions of mask %lu\n", 365ef4b04f8SRavi Kumar (int)cmd_q->id, weight, cmd_q->lsbmask); 366ef4b04f8SRavi Kumar 367ef4b04f8SRavi Kumar return weight ? 0 : -EINVAL; 368ef4b04f8SRavi Kumar } 369ef4b04f8SRavi Kumar 370ef4b04f8SRavi Kumar static int 371ef4b04f8SRavi Kumar ccp_find_and_assign_lsb_to_q(struct ccp_device *ccp, 372ef4b04f8SRavi Kumar int lsb_cnt, int n_lsbs, 373ef4b04f8SRavi Kumar unsigned long *lsb_pub) 374ef4b04f8SRavi Kumar { 375ef4b04f8SRavi Kumar unsigned long qlsb = 0; 376ef4b04f8SRavi Kumar int bitno = 0; 377ef4b04f8SRavi Kumar int qlsb_wgt = 0; 378ef4b04f8SRavi Kumar int i, j; 379ef4b04f8SRavi Kumar 380ef4b04f8SRavi Kumar /* For each queue: 381ef4b04f8SRavi Kumar * If the count of potential LSBs available to a queue matches the 382ef4b04f8SRavi Kumar * ordinal given to us in lsb_cnt: 383ef4b04f8SRavi Kumar * Copy the mask of possible LSBs for this queue into "qlsb"; 384ef4b04f8SRavi Kumar * For each bit in qlsb, see if the corresponding bit in the 385ef4b04f8SRavi Kumar * aggregation mask is set; if so, we have a match. 386ef4b04f8SRavi Kumar * If we have a match, clear the bit in the aggregation to 387ef4b04f8SRavi Kumar * mark it as no longer available. 388ef4b04f8SRavi Kumar * If there is no match, clear the bit in qlsb and keep looking. 389ef4b04f8SRavi Kumar */ 390ef4b04f8SRavi Kumar for (i = 0; i < ccp->cmd_q_count; i++) { 391ef4b04f8SRavi Kumar struct ccp_queue *cmd_q = &ccp->cmd_q[i]; 392ef4b04f8SRavi Kumar 393ef4b04f8SRavi Kumar qlsb_wgt = 0; 394ef4b04f8SRavi Kumar for (j = 0; j < MAX_LSB_CNT; j++) 395ef4b04f8SRavi Kumar if (ccp_get_bit(&cmd_q->lsbmask, j)) 396ef4b04f8SRavi Kumar qlsb_wgt++; 397ef4b04f8SRavi Kumar 398ef4b04f8SRavi Kumar if (qlsb_wgt == lsb_cnt) { 399ef4b04f8SRavi Kumar qlsb = cmd_q->lsbmask; 400ef4b04f8SRavi Kumar 401ef4b04f8SRavi Kumar bitno = ffs(qlsb) - 1; 402ef4b04f8SRavi Kumar while (bitno < MAX_LSB_CNT) { 403ef4b04f8SRavi Kumar if (ccp_get_bit(lsb_pub, bitno)) { 404ef4b04f8SRavi Kumar /* We found an available LSB 405ef4b04f8SRavi Kumar * that this queue can access 406ef4b04f8SRavi Kumar */ 407ef4b04f8SRavi Kumar cmd_q->lsb = bitno; 408ef4b04f8SRavi Kumar ccp_clear_bit(lsb_pub, bitno); 409ef4b04f8SRavi Kumar break; 410ef4b04f8SRavi Kumar } 411ef4b04f8SRavi Kumar ccp_clear_bit(&qlsb, bitno); 412ef4b04f8SRavi Kumar bitno = ffs(qlsb) - 1; 413ef4b04f8SRavi Kumar } 414ef4b04f8SRavi Kumar if (bitno >= MAX_LSB_CNT) 415ef4b04f8SRavi Kumar return -EINVAL; 416ef4b04f8SRavi Kumar n_lsbs--; 417ef4b04f8SRavi Kumar } 418ef4b04f8SRavi Kumar } 419ef4b04f8SRavi Kumar return n_lsbs; 420ef4b04f8SRavi Kumar } 421ef4b04f8SRavi Kumar 422ef4b04f8SRavi Kumar /* For each queue, from the most- to least-constrained: 423ef4b04f8SRavi Kumar * find an LSB that can be assigned to the queue. If there are N queues that 424ef4b04f8SRavi Kumar * can only use M LSBs, where N > M, fail; otherwise, every queue will get a 425ef4b04f8SRavi Kumar * dedicated LSB. Remaining LSB regions become a shared resource. 426ef4b04f8SRavi Kumar * If we have fewer LSBs than queues, all LSB regions become shared 427ef4b04f8SRavi Kumar * resources. 428ef4b04f8SRavi Kumar */ 429ef4b04f8SRavi Kumar static int 430ef4b04f8SRavi Kumar ccp_assign_lsbs(struct ccp_device *ccp) 431ef4b04f8SRavi Kumar { 432ef4b04f8SRavi Kumar unsigned long lsb_pub = 0, qlsb = 0; 433ef4b04f8SRavi Kumar int n_lsbs = 0; 434ef4b04f8SRavi Kumar int bitno; 435ef4b04f8SRavi Kumar int i, lsb_cnt; 436ef4b04f8SRavi Kumar int rc = 0; 437ef4b04f8SRavi Kumar 438ef4b04f8SRavi Kumar rte_spinlock_init(&ccp->lsb_lock); 439ef4b04f8SRavi Kumar 440ef4b04f8SRavi Kumar /* Create an aggregate bitmap to get a total count of available LSBs */ 441ef4b04f8SRavi Kumar for (i = 0; i < ccp->cmd_q_count; i++) 442ef4b04f8SRavi Kumar lsb_pub |= ccp->cmd_q[i].lsbmask; 443ef4b04f8SRavi Kumar 444ef4b04f8SRavi Kumar for (i = 0; i < MAX_LSB_CNT; i++) 445ef4b04f8SRavi Kumar if (ccp_get_bit(&lsb_pub, i)) 446ef4b04f8SRavi Kumar n_lsbs++; 447ef4b04f8SRavi Kumar 448ef4b04f8SRavi Kumar if (n_lsbs >= ccp->cmd_q_count) { 449ef4b04f8SRavi Kumar /* We have enough LSBS to give every queue a private LSB. 450ef4b04f8SRavi Kumar * Brute force search to start with the queues that are more 451ef4b04f8SRavi Kumar * constrained in LSB choice. When an LSB is privately 452ef4b04f8SRavi Kumar * assigned, it is removed from the public mask. 453ef4b04f8SRavi Kumar * This is an ugly N squared algorithm with some optimization. 454ef4b04f8SRavi Kumar */ 455ef4b04f8SRavi Kumar for (lsb_cnt = 1; n_lsbs && (lsb_cnt <= MAX_LSB_CNT); 456ef4b04f8SRavi Kumar lsb_cnt++) { 457ef4b04f8SRavi Kumar rc = ccp_find_and_assign_lsb_to_q(ccp, lsb_cnt, n_lsbs, 458ef4b04f8SRavi Kumar &lsb_pub); 459ef4b04f8SRavi Kumar if (rc < 0) 460ef4b04f8SRavi Kumar return -EINVAL; 461ef4b04f8SRavi Kumar n_lsbs = rc; 462ef4b04f8SRavi Kumar } 463ef4b04f8SRavi Kumar } 464ef4b04f8SRavi Kumar 465ef4b04f8SRavi Kumar rc = 0; 466ef4b04f8SRavi Kumar /* What's left of the LSBs, according to the public mask, now become 467ef4b04f8SRavi Kumar * shared. Any zero bits in the lsb_pub mask represent an LSB region 468ef4b04f8SRavi Kumar * that can't be used as a shared resource, so mark the LSB slots for 469ef4b04f8SRavi Kumar * them as "in use". 470ef4b04f8SRavi Kumar */ 471ef4b04f8SRavi Kumar qlsb = lsb_pub; 472ef4b04f8SRavi Kumar bitno = ccp_find_first_zero_bit(&qlsb, MAX_LSB_CNT); 473ef4b04f8SRavi Kumar while (bitno < MAX_LSB_CNT) { 474ef4b04f8SRavi Kumar ccp_bitmap_set(ccp->lsbmap, bitno * LSB_SIZE, LSB_SIZE); 475ef4b04f8SRavi Kumar ccp_set_bit(&qlsb, bitno); 476ef4b04f8SRavi Kumar bitno = ccp_find_first_zero_bit(&qlsb, MAX_LSB_CNT); 477ef4b04f8SRavi Kumar } 478ef4b04f8SRavi Kumar 479ef4b04f8SRavi Kumar return rc; 480ef4b04f8SRavi Kumar } 481ef4b04f8SRavi Kumar 482ef4b04f8SRavi Kumar static int 483ef4b04f8SRavi Kumar ccp_add_device(struct ccp_device *dev, int type) 484ef4b04f8SRavi Kumar { 485ef4b04f8SRavi Kumar int i; 486ef4b04f8SRavi Kumar uint32_t qmr, status_lo, status_hi, dma_addr_lo, dma_addr_hi; 487ef4b04f8SRavi Kumar uint64_t status; 488ef4b04f8SRavi Kumar struct ccp_queue *cmd_q; 489ef4b04f8SRavi Kumar const struct rte_memzone *q_mz; 490ef4b04f8SRavi Kumar void *vaddr; 491ef4b04f8SRavi Kumar 492ef4b04f8SRavi Kumar if (dev == NULL) 493ef4b04f8SRavi Kumar return -1; 494ef4b04f8SRavi Kumar 495ef4b04f8SRavi Kumar dev->id = ccp_dev_id++; 496ef4b04f8SRavi Kumar dev->qidx = 0; 497ef4b04f8SRavi Kumar vaddr = (void *)(dev->pci.mem_resource[2].addr); 498ef4b04f8SRavi Kumar 499ef4b04f8SRavi Kumar if (type == CCP_VERSION_5B) { 500ef4b04f8SRavi Kumar CCP_WRITE_REG(vaddr, CMD_TRNG_CTL_OFFSET, 0x00012D57); 501ef4b04f8SRavi Kumar CCP_WRITE_REG(vaddr, CMD_CONFIG_0_OFFSET, 0x00000003); 502ef4b04f8SRavi Kumar for (i = 0; i < 12; i++) { 503ef4b04f8SRavi Kumar CCP_WRITE_REG(vaddr, CMD_AES_MASK_OFFSET, 504ef4b04f8SRavi Kumar CCP_READ_REG(vaddr, TRNG_OUT_REG)); 505ef4b04f8SRavi Kumar } 506ef4b04f8SRavi Kumar CCP_WRITE_REG(vaddr, CMD_QUEUE_MASK_OFFSET, 0x0000001F); 507ef4b04f8SRavi Kumar CCP_WRITE_REG(vaddr, CMD_QUEUE_PRIO_OFFSET, 0x00005B6D); 508ef4b04f8SRavi Kumar CCP_WRITE_REG(vaddr, CMD_CMD_TIMEOUT_OFFSET, 0x00000000); 509ef4b04f8SRavi Kumar 510ef4b04f8SRavi Kumar CCP_WRITE_REG(vaddr, LSB_PRIVATE_MASK_LO_OFFSET, 0x3FFFFFFF); 511ef4b04f8SRavi Kumar CCP_WRITE_REG(vaddr, LSB_PRIVATE_MASK_HI_OFFSET, 0x000003FF); 512ef4b04f8SRavi Kumar 513ef4b04f8SRavi Kumar CCP_WRITE_REG(vaddr, CMD_CLK_GATE_CTL_OFFSET, 0x00108823); 514ef4b04f8SRavi Kumar } 515ef4b04f8SRavi Kumar CCP_WRITE_REG(vaddr, CMD_REQID_CONFIG_OFFSET, 0x00001249); 516ef4b04f8SRavi Kumar 517ef4b04f8SRavi Kumar /* Copy the private LSB mask to the public registers */ 518ef4b04f8SRavi Kumar status_lo = CCP_READ_REG(vaddr, LSB_PRIVATE_MASK_LO_OFFSET); 519ef4b04f8SRavi Kumar status_hi = CCP_READ_REG(vaddr, LSB_PRIVATE_MASK_HI_OFFSET); 520ef4b04f8SRavi Kumar CCP_WRITE_REG(vaddr, LSB_PUBLIC_MASK_LO_OFFSET, status_lo); 521ef4b04f8SRavi Kumar CCP_WRITE_REG(vaddr, LSB_PUBLIC_MASK_HI_OFFSET, status_hi); 522ef4b04f8SRavi Kumar status = ((uint64_t)status_hi<<30) | ((uint64_t)status_lo); 523ef4b04f8SRavi Kumar 524ef4b04f8SRavi Kumar dev->cmd_q_count = 0; 525ef4b04f8SRavi Kumar /* Find available queues */ 526ef4b04f8SRavi Kumar qmr = CCP_READ_REG(vaddr, Q_MASK_REG); 527ef4b04f8SRavi Kumar for (i = 0; i < MAX_HW_QUEUES; i++) { 528ef4b04f8SRavi Kumar if (!(qmr & (1 << i))) 529ef4b04f8SRavi Kumar continue; 530ef4b04f8SRavi Kumar cmd_q = &dev->cmd_q[dev->cmd_q_count++]; 531ef4b04f8SRavi Kumar cmd_q->dev = dev; 532ef4b04f8SRavi Kumar cmd_q->id = i; 533ef4b04f8SRavi Kumar cmd_q->qidx = 0; 534ef4b04f8SRavi Kumar cmd_q->qsize = Q_SIZE(Q_DESC_SIZE); 535ef4b04f8SRavi Kumar 536ef4b04f8SRavi Kumar cmd_q->reg_base = (uint8_t *)vaddr + 537ef4b04f8SRavi Kumar CMD_Q_STATUS_INCR * (i + 1); 538ef4b04f8SRavi Kumar 539ef4b04f8SRavi Kumar /* CCP queue memory */ 540ef4b04f8SRavi Kumar snprintf(cmd_q->memz_name, sizeof(cmd_q->memz_name), 541ef4b04f8SRavi Kumar "%s_%d_%s_%d_%s", 542ef4b04f8SRavi Kumar "ccp_dev", 543ef4b04f8SRavi Kumar (int)dev->id, "queue", 544ef4b04f8SRavi Kumar (int)cmd_q->id, "mem"); 545ef4b04f8SRavi Kumar q_mz = ccp_queue_dma_zone_reserve(cmd_q->memz_name, 546ef4b04f8SRavi Kumar cmd_q->qsize, SOCKET_ID_ANY); 547ef4b04f8SRavi Kumar cmd_q->qbase_addr = (void *)q_mz->addr; 548ef4b04f8SRavi Kumar cmd_q->qbase_desc = (void *)q_mz->addr; 549*72f82c43SThomas Monjalon cmd_q->qbase_phys_addr = q_mz->iova; 550ef4b04f8SRavi Kumar 551ef4b04f8SRavi Kumar cmd_q->qcontrol = 0; 552ef4b04f8SRavi Kumar /* init control reg to zero */ 553ef4b04f8SRavi Kumar CCP_WRITE_REG(cmd_q->reg_base, CMD_Q_CONTROL_BASE, 554ef4b04f8SRavi Kumar cmd_q->qcontrol); 555ef4b04f8SRavi Kumar 556ef4b04f8SRavi Kumar /* Disable the interrupts */ 557ef4b04f8SRavi Kumar CCP_WRITE_REG(cmd_q->reg_base, CMD_Q_INT_ENABLE_BASE, 0x00); 558ef4b04f8SRavi Kumar CCP_READ_REG(cmd_q->reg_base, CMD_Q_INT_STATUS_BASE); 559ef4b04f8SRavi Kumar CCP_READ_REG(cmd_q->reg_base, CMD_Q_STATUS_BASE); 560ef4b04f8SRavi Kumar 561ef4b04f8SRavi Kumar /* Clear the interrupts */ 562ef4b04f8SRavi Kumar CCP_WRITE_REG(cmd_q->reg_base, CMD_Q_INTERRUPT_STATUS_BASE, 563ef4b04f8SRavi Kumar ALL_INTERRUPTS); 564ef4b04f8SRavi Kumar 565ef4b04f8SRavi Kumar /* Configure size of each virtual queue accessible to host */ 566ef4b04f8SRavi Kumar cmd_q->qcontrol &= ~(CMD_Q_SIZE << CMD_Q_SHIFT); 567ef4b04f8SRavi Kumar cmd_q->qcontrol |= QUEUE_SIZE_VAL << CMD_Q_SHIFT; 568ef4b04f8SRavi Kumar 569ef4b04f8SRavi Kumar dma_addr_lo = low32_value(cmd_q->qbase_phys_addr); 570ef4b04f8SRavi Kumar CCP_WRITE_REG(cmd_q->reg_base, CMD_Q_TAIL_LO_BASE, 571ef4b04f8SRavi Kumar (uint32_t)dma_addr_lo); 572ef4b04f8SRavi Kumar CCP_WRITE_REG(cmd_q->reg_base, CMD_Q_HEAD_LO_BASE, 573ef4b04f8SRavi Kumar (uint32_t)dma_addr_lo); 574ef4b04f8SRavi Kumar 575ef4b04f8SRavi Kumar dma_addr_hi = high32_value(cmd_q->qbase_phys_addr); 576ef4b04f8SRavi Kumar cmd_q->qcontrol |= (dma_addr_hi << 16); 577ef4b04f8SRavi Kumar CCP_WRITE_REG(cmd_q->reg_base, CMD_Q_CONTROL_BASE, 578ef4b04f8SRavi Kumar cmd_q->qcontrol); 579ef4b04f8SRavi Kumar 580ef4b04f8SRavi Kumar /* create LSB Mask map */ 581ef4b04f8SRavi Kumar if (ccp_find_lsb_regions(cmd_q, status)) 582ef4b04f8SRavi Kumar CCP_LOG_ERR("queue doesn't have lsb regions"); 583ef4b04f8SRavi Kumar cmd_q->lsb = -1; 584ef4b04f8SRavi Kumar 585ef4b04f8SRavi Kumar rte_atomic64_init(&cmd_q->free_slots); 586ef4b04f8SRavi Kumar rte_atomic64_set(&cmd_q->free_slots, (COMMANDS_PER_QUEUE - 1)); 587ef4b04f8SRavi Kumar /* unused slot barrier b/w H&T */ 588ef4b04f8SRavi Kumar } 589ef4b04f8SRavi Kumar 590ef4b04f8SRavi Kumar if (ccp_assign_lsbs(dev)) 591ef4b04f8SRavi Kumar CCP_LOG_ERR("Unable to assign lsb region"); 592ef4b04f8SRavi Kumar 593ef4b04f8SRavi Kumar /* pre-allocate LSB slots */ 594ef4b04f8SRavi Kumar for (i = 0; i < dev->cmd_q_count; i++) { 595ef4b04f8SRavi Kumar dev->cmd_q[i].sb_key = 596ef4b04f8SRavi Kumar ccp_lsb_alloc(&dev->cmd_q[i], 1); 597ef4b04f8SRavi Kumar dev->cmd_q[i].sb_iv = 598ef4b04f8SRavi Kumar ccp_lsb_alloc(&dev->cmd_q[i], 1); 599ef4b04f8SRavi Kumar dev->cmd_q[i].sb_sha = 600ef4b04f8SRavi Kumar ccp_lsb_alloc(&dev->cmd_q[i], 2); 601ef4b04f8SRavi Kumar dev->cmd_q[i].sb_hmac = 602ef4b04f8SRavi Kumar ccp_lsb_alloc(&dev->cmd_q[i], 2); 603ef4b04f8SRavi Kumar } 604ef4b04f8SRavi Kumar 605ef4b04f8SRavi Kumar TAILQ_INSERT_TAIL(&ccp_list, dev, next); 606ef4b04f8SRavi Kumar return 0; 607ef4b04f8SRavi Kumar } 608ef4b04f8SRavi Kumar 609ef4b04f8SRavi Kumar static void 610ef4b04f8SRavi Kumar ccp_remove_device(struct ccp_device *dev) 611ef4b04f8SRavi Kumar { 612ef4b04f8SRavi Kumar if (dev == NULL) 613ef4b04f8SRavi Kumar return; 614ef4b04f8SRavi Kumar 615ef4b04f8SRavi Kumar TAILQ_REMOVE(&ccp_list, dev, next); 616ef4b04f8SRavi Kumar } 617ef4b04f8SRavi Kumar 618ef4b04f8SRavi Kumar static int 619ef4b04f8SRavi Kumar is_ccp_device(const char *dirname, 620ef4b04f8SRavi Kumar const struct rte_pci_id *ccp_id, 621ef4b04f8SRavi Kumar int *type) 622ef4b04f8SRavi Kumar { 623ef4b04f8SRavi Kumar char filename[PATH_MAX]; 624ef4b04f8SRavi Kumar const struct rte_pci_id *id; 625ef4b04f8SRavi Kumar uint16_t vendor, device_id; 626ef4b04f8SRavi Kumar int i; 627ef4b04f8SRavi Kumar unsigned long tmp; 628ef4b04f8SRavi Kumar 629ef4b04f8SRavi Kumar /* get vendor id */ 630ef4b04f8SRavi Kumar snprintf(filename, sizeof(filename), "%s/vendor", dirname); 631ef4b04f8SRavi Kumar if (ccp_pci_parse_sysfs_value(filename, &tmp) < 0) 632ef4b04f8SRavi Kumar return 0; 633ef4b04f8SRavi Kumar vendor = (uint16_t)tmp; 634ef4b04f8SRavi Kumar 635ef4b04f8SRavi Kumar /* get device id */ 636ef4b04f8SRavi Kumar snprintf(filename, sizeof(filename), "%s/device", dirname); 637ef4b04f8SRavi Kumar if (ccp_pci_parse_sysfs_value(filename, &tmp) < 0) 638ef4b04f8SRavi Kumar return 0; 639ef4b04f8SRavi Kumar device_id = (uint16_t)tmp; 640ef4b04f8SRavi Kumar 641ef4b04f8SRavi Kumar for (id = ccp_id, i = 0; id->vendor_id != 0; id++, i++) { 642ef4b04f8SRavi Kumar if (vendor == id->vendor_id && 643ef4b04f8SRavi Kumar device_id == id->device_id) { 644ef4b04f8SRavi Kumar *type = i; 645ef4b04f8SRavi Kumar return 1; /* Matched device */ 646ef4b04f8SRavi Kumar } 647ef4b04f8SRavi Kumar } 648ef4b04f8SRavi Kumar return 0; 649ef4b04f8SRavi Kumar } 650ef4b04f8SRavi Kumar 651ef4b04f8SRavi Kumar static int 652ef4b04f8SRavi Kumar ccp_probe_device(const char *dirname, uint16_t domain, 653ef4b04f8SRavi Kumar uint8_t bus, uint8_t devid, 654ef4b04f8SRavi Kumar uint8_t function, int ccp_type) 655ef4b04f8SRavi Kumar { 656ef4b04f8SRavi Kumar struct ccp_device *ccp_dev = NULL; 657ef4b04f8SRavi Kumar struct rte_pci_device *pci; 658ef4b04f8SRavi Kumar char filename[PATH_MAX]; 659ef4b04f8SRavi Kumar unsigned long tmp; 660ef4b04f8SRavi Kumar int uio_fd = -1, i, uio_num; 661ef4b04f8SRavi Kumar char uio_devname[PATH_MAX]; 662ef4b04f8SRavi Kumar void *map_addr; 663ef4b04f8SRavi Kumar 664ef4b04f8SRavi Kumar ccp_dev = rte_zmalloc("ccp_device", sizeof(*ccp_dev), 665ef4b04f8SRavi Kumar RTE_CACHE_LINE_SIZE); 666ef4b04f8SRavi Kumar if (ccp_dev == NULL) 667ef4b04f8SRavi Kumar goto fail; 668ef4b04f8SRavi Kumar pci = &(ccp_dev->pci); 669ef4b04f8SRavi Kumar 670ef4b04f8SRavi Kumar pci->addr.domain = domain; 671ef4b04f8SRavi Kumar pci->addr.bus = bus; 672ef4b04f8SRavi Kumar pci->addr.devid = devid; 673ef4b04f8SRavi Kumar pci->addr.function = function; 674ef4b04f8SRavi Kumar 675ef4b04f8SRavi Kumar /* get vendor id */ 676ef4b04f8SRavi Kumar snprintf(filename, sizeof(filename), "%s/vendor", dirname); 677ef4b04f8SRavi Kumar if (ccp_pci_parse_sysfs_value(filename, &tmp) < 0) 678ef4b04f8SRavi Kumar goto fail; 679ef4b04f8SRavi Kumar pci->id.vendor_id = (uint16_t)tmp; 680ef4b04f8SRavi Kumar 681ef4b04f8SRavi Kumar /* get device id */ 682ef4b04f8SRavi Kumar snprintf(filename, sizeof(filename), "%s/device", dirname); 683ef4b04f8SRavi Kumar if (ccp_pci_parse_sysfs_value(filename, &tmp) < 0) 684ef4b04f8SRavi Kumar goto fail; 685ef4b04f8SRavi Kumar pci->id.device_id = (uint16_t)tmp; 686ef4b04f8SRavi Kumar 687ef4b04f8SRavi Kumar /* get subsystem_vendor id */ 688ef4b04f8SRavi Kumar snprintf(filename, sizeof(filename), "%s/subsystem_vendor", 689ef4b04f8SRavi Kumar dirname); 690ef4b04f8SRavi Kumar if (ccp_pci_parse_sysfs_value(filename, &tmp) < 0) 691ef4b04f8SRavi Kumar goto fail; 692ef4b04f8SRavi Kumar pci->id.subsystem_vendor_id = (uint16_t)tmp; 693ef4b04f8SRavi Kumar 694ef4b04f8SRavi Kumar /* get subsystem_device id */ 695ef4b04f8SRavi Kumar snprintf(filename, sizeof(filename), "%s/subsystem_device", 696ef4b04f8SRavi Kumar dirname); 697ef4b04f8SRavi Kumar if (ccp_pci_parse_sysfs_value(filename, &tmp) < 0) 698ef4b04f8SRavi Kumar goto fail; 699ef4b04f8SRavi Kumar pci->id.subsystem_device_id = (uint16_t)tmp; 700ef4b04f8SRavi Kumar 701ef4b04f8SRavi Kumar /* get class_id */ 702ef4b04f8SRavi Kumar snprintf(filename, sizeof(filename), "%s/class", 703ef4b04f8SRavi Kumar dirname); 704ef4b04f8SRavi Kumar if (ccp_pci_parse_sysfs_value(filename, &tmp) < 0) 705ef4b04f8SRavi Kumar goto fail; 706ef4b04f8SRavi Kumar /* the least 24 bits are valid: class, subclass, program interface */ 707ef4b04f8SRavi Kumar pci->id.class_id = (uint32_t)tmp & RTE_CLASS_ANY_ID; 708ef4b04f8SRavi Kumar 709ef4b04f8SRavi Kumar /* parse resources */ 710ef4b04f8SRavi Kumar snprintf(filename, sizeof(filename), "%s/resource", dirname); 711ef4b04f8SRavi Kumar if (ccp_pci_parse_sysfs_resource(filename, pci) < 0) 712ef4b04f8SRavi Kumar goto fail; 713ef4b04f8SRavi Kumar 714ef4b04f8SRavi Kumar uio_num = ccp_find_uio_devname(dirname); 715ef4b04f8SRavi Kumar if (uio_num < 0) { 716ef4b04f8SRavi Kumar /* 717ef4b04f8SRavi Kumar * It may take time for uio device to appear, 718ef4b04f8SRavi Kumar * wait here and try again 719ef4b04f8SRavi Kumar */ 720ef4b04f8SRavi Kumar usleep(100000); 721ef4b04f8SRavi Kumar uio_num = ccp_find_uio_devname(dirname); 722ef4b04f8SRavi Kumar if (uio_num < 0) 723ef4b04f8SRavi Kumar goto fail; 724ef4b04f8SRavi Kumar } 725ef4b04f8SRavi Kumar snprintf(uio_devname, sizeof(uio_devname), "/dev/uio%u", uio_num); 726ef4b04f8SRavi Kumar 727ef4b04f8SRavi Kumar uio_fd = open(uio_devname, O_RDWR | O_NONBLOCK); 728ef4b04f8SRavi Kumar if (uio_fd < 0) 729ef4b04f8SRavi Kumar goto fail; 730ef4b04f8SRavi Kumar if (flock(uio_fd, LOCK_EX | LOCK_NB)) 731ef4b04f8SRavi Kumar goto fail; 732ef4b04f8SRavi Kumar 733ef4b04f8SRavi Kumar /* Map the PCI memory resource of device */ 734ef4b04f8SRavi Kumar for (i = 0; i < PCI_MAX_RESOURCE; i++) { 735ef4b04f8SRavi Kumar 736ef4b04f8SRavi Kumar char devname[PATH_MAX]; 737ef4b04f8SRavi Kumar int res_fd; 738ef4b04f8SRavi Kumar 739ef4b04f8SRavi Kumar if (pci->mem_resource[i].phys_addr == 0) 740ef4b04f8SRavi Kumar continue; 741ef4b04f8SRavi Kumar snprintf(devname, sizeof(devname), "%s/resource%d", dirname, i); 742ef4b04f8SRavi Kumar res_fd = open(devname, O_RDWR); 743ef4b04f8SRavi Kumar if (res_fd < 0) 744ef4b04f8SRavi Kumar goto fail; 745ef4b04f8SRavi Kumar map_addr = mmap(NULL, pci->mem_resource[i].len, 746ef4b04f8SRavi Kumar PROT_READ | PROT_WRITE, 747ef4b04f8SRavi Kumar MAP_SHARED, res_fd, 0); 748ef4b04f8SRavi Kumar if (map_addr == MAP_FAILED) 749ef4b04f8SRavi Kumar goto fail; 750ef4b04f8SRavi Kumar 751ef4b04f8SRavi Kumar pci->mem_resource[i].addr = map_addr; 752ef4b04f8SRavi Kumar } 753ef4b04f8SRavi Kumar 754ef4b04f8SRavi Kumar /* device is valid, add in list */ 755ef4b04f8SRavi Kumar if (ccp_add_device(ccp_dev, ccp_type)) { 756ef4b04f8SRavi Kumar ccp_remove_device(ccp_dev); 757ef4b04f8SRavi Kumar goto fail; 758ef4b04f8SRavi Kumar } 759ef4b04f8SRavi Kumar 760ef4b04f8SRavi Kumar return 0; 761ef4b04f8SRavi Kumar fail: 762ef4b04f8SRavi Kumar CCP_LOG_ERR("CCP Device probe failed"); 7634f429be4SYunjian Wang if (uio_fd >= 0) 764ef4b04f8SRavi Kumar close(uio_fd); 765ef4b04f8SRavi Kumar if (ccp_dev) 766ef4b04f8SRavi Kumar rte_free(ccp_dev); 767ef4b04f8SRavi Kumar return -1; 768ef4b04f8SRavi Kumar } 769ef4b04f8SRavi Kumar 770ef4b04f8SRavi Kumar int 771ef4b04f8SRavi Kumar ccp_probe_devices(const struct rte_pci_id *ccp_id) 772ef4b04f8SRavi Kumar { 773ef4b04f8SRavi Kumar int dev_cnt = 0; 774ef4b04f8SRavi Kumar int ccp_type = 0; 775ef4b04f8SRavi Kumar struct dirent *d; 776ef4b04f8SRavi Kumar DIR *dir; 777ef4b04f8SRavi Kumar int ret = 0; 778ef4b04f8SRavi Kumar int module_idx = 0; 779ef4b04f8SRavi Kumar uint16_t domain; 780ef4b04f8SRavi Kumar uint8_t bus, devid, function; 781ef4b04f8SRavi Kumar char dirname[PATH_MAX]; 782ef4b04f8SRavi Kumar 783ef4b04f8SRavi Kumar module_idx = ccp_check_pci_uio_module(); 784ef4b04f8SRavi Kumar if (module_idx < 0) 785ef4b04f8SRavi Kumar return -1; 786ef4b04f8SRavi Kumar 787ef4b04f8SRavi Kumar TAILQ_INIT(&ccp_list); 788ef4b04f8SRavi Kumar dir = opendir(SYSFS_PCI_DEVICES); 789ef4b04f8SRavi Kumar if (dir == NULL) 790ef4b04f8SRavi Kumar return -1; 791ef4b04f8SRavi Kumar while ((d = readdir(dir)) != NULL) { 792ef4b04f8SRavi Kumar if (d->d_name[0] == '.') 793ef4b04f8SRavi Kumar continue; 794ef4b04f8SRavi Kumar if (ccp_parse_pci_addr_format(d->d_name, sizeof(d->d_name), 795ef4b04f8SRavi Kumar &domain, &bus, &devid, &function) != 0) 796ef4b04f8SRavi Kumar continue; 797ef4b04f8SRavi Kumar snprintf(dirname, sizeof(dirname), "%s/%s", 798ef4b04f8SRavi Kumar SYSFS_PCI_DEVICES, d->d_name); 799ef4b04f8SRavi Kumar if (is_ccp_device(dirname, ccp_id, &ccp_type)) { 800ef4b04f8SRavi Kumar printf("CCP : Detected CCP device with ID = 0x%x\n", 801ef4b04f8SRavi Kumar ccp_id[ccp_type].device_id); 802ef4b04f8SRavi Kumar ret = ccp_probe_device(dirname, domain, bus, devid, 803ef4b04f8SRavi Kumar function, ccp_type); 804ef4b04f8SRavi Kumar if (ret == 0) 805ef4b04f8SRavi Kumar dev_cnt++; 806ef4b04f8SRavi Kumar } 807ef4b04f8SRavi Kumar } 808ef4b04f8SRavi Kumar closedir(dir); 809ef4b04f8SRavi Kumar return dev_cnt; 810ef4b04f8SRavi Kumar } 811