Lines Matching +full:iommu +full:- +full:addresses

1 /*-
2 * SPDX-License-Identifier: BSD-2-Clause
4 * Copyright (C) 2012-2014 Matteo Landi
5 * Copyright (C) 2012-2016 Luigi Rizzo
6 * Copyright (C) 2012-2016 Giuseppe Lettieri
98 /* ---------------------------------------------------*/
103 struct lut_entry *lut; /* virt,phys addresses, objtotal entries */
113 /* ---------------------------------------------------*/
134 #define NMA_LOCK_INIT(n) NM_MTX_INIT((n)->nm_mtx)
135 #define NMA_LOCK_DESTROY(n) NM_MTX_DESTROY((n)->nm_mtx)
136 #define NMA_LOCK(n) NM_MTX_LOCK((n)->nm_mtx)
137 #define NMA_SPINLOCK(n) NM_MTX_SPINLOCK((n)->nm_mtx)
138 #define NMA_UNLOCK(n) NM_MTX_UNLOCK((n)->nm_mtx)
177 int nm_grp; /* iommu group id */
197 rv = nmd->ops->nmd_get_lut(nmd, lut);
210 rv = nmd->ops->nmd_get_info(nmd, size, memflags, memid);
223 * non-sleepable lock since FreeBSD 12. Since we cannot sleep, we
229 pa = nmd->ops->nmd_ofstophys(nmd, off);
238 if (nmd->active) {
245 return nmd->ops->nmd_config(nmd);
254 rv = nmd->ops->nmd_if_offset(nmd, off);
263 nmd->ops->nmd_delete(nmd);
270 struct netmap_mem_d *nmd = na->nm_mem;
273 nifp = nmd->ops->nmd_if_new(nmd, na, priv);
282 struct netmap_mem_d *nmd = na->nm_mem;
285 nmd->ops->nmd_if_delete(nmd, na, nif);
293 struct netmap_mem_d *nmd = na->nm_mem;
296 rv = nmd->ops->nmd_rings_create(nmd, na);
305 struct netmap_mem_d *nmd = na->nm_mem;
308 nmd->ops->nmd_rings_delete(nmd, na);
320 return nmd->nm_id;
325 nm_prinf("%s:%d mem[%d:%d] -> %d", func, line, (nmd)->nm_id, (nmd)->nm_grp, (nmd)->refcount);
338 nmd->refcount++;
349 last = (--nmd->refcount == 0);
362 if (nm_mem_check_group(nmd, na->pdev) < 0) {
371 nmd->active++;
373 nmd->lasterr = nmd->ops->nmd_finalize(nmd, na);
375 if (!nmd->lasterr && !(nmd->flags & NETMAP_MEM_NOMAP)) {
376 nmd->lasterr = netmap_mem_map(&nmd->pools[NETMAP_BUF_POOL], na);
380 lasterr = nmd->lasterr;
401 if (p->bitmap == NULL) {
403 n = (p->objtotal + 31) / 32;
404 p->bitmap = nm_os_malloc(sizeof(p->bitmap[0]) * n);
405 if (p->bitmap == NULL) {
407 p->name);
410 p->bitmap_slots = n;
412 memset(p->bitmap, 0, p->bitmap_slots * sizeof(p->bitmap[0]));
415 p->objfree = 0;
421 for (j = 0; j < p->objtotal; j++) {
422 if (p->invalid_bitmap && nm_isset(p->invalid_bitmap, j)) {
424 nm_prinf("skipping %s %d", p->name, j);
427 p->bitmap[ (j>>5) ] |= ( 1U << (j & 31U) );
428 p->objfree++;
432 nm_prinf("%s free %u", p->name, p->objfree);
433 if (p->objfree == 0) {
435 nm_prerr("%s: no objects available", p->name);
448 struct netmap_obj_pool *p = &nmd->pools[i];
458 if (nmd->pools[NETMAP_BUF_POOL].objfree < 2) {
459 nm_prerr("%s: not enough buffers", nmd->pools[NETMAP_BUF_POOL].name);
463 nmd->pools[NETMAP_BUF_POOL].objfree -= 2;
464 if (nmd->pools[NETMAP_BUF_POOL].bitmap) {
468 * Removed shared-info --> is the bug still there? */
469 nmd->pools[NETMAP_BUF_POOL].bitmap[0] = ~3U;
479 if (na->active_fds <= 0 && !(nmd->flags & NETMAP_MEM_NOMAP))
480 netmap_mem_unmap(&nmd->pools[NETMAP_BUF_POOL], na);
481 if (nmd->active == 1) {
490 nmd->ops->nmd_deref(nmd, na);
492 nmd->active--;
494 nmd->lasterr = 0;
506 lut->lut = nmd->pools[NETMAP_BUF_POOL].lut;
508 lut->plut = lut->lut;
510 lut->objtotal = nmd->pools[NETMAP_BUF_POOL].objtotal;
511 lut->objsize = nmd->pools[NETMAP_BUF_POOL]._objsize;
579 .nm_grp = -1,
580 .nm_numa_domain = -1,
619 .nm_grp = -1,
620 .nm_numa_domain = -1,
657 "Use NUMA-local memory for memory pools when possible");
669 id = scan->nm_id + 1;
672 scan = scan->next;
673 if (id != scan->nm_id) {
674 nmd->nm_id = id;
675 nmd->nm_grp = grp_id;
676 nmd->nm_numa_domain = domain;
677 nmd->prev = scan->prev;
678 nmd->next = scan;
679 scan->prev->next = nmd;
680 scan->prev = nmd;
682 nmd->refcount = 1;
699 ret = nm_mem_assign_id_locked(nmd, grp_id, -1);
709 nmd->prev->next = nmd->next;
710 nmd->next->prev = nmd->prev;
713 netmap_last_mem_d = nmd->prev;
715 nmd->prev = nmd->next = NULL;
726 if (!(nmd->flags & NETMAP_MEM_HIDDEN) && nmd->nm_id == id) {
727 nmd->refcount++;
732 nmd = nmd->next;
744 * Vale port can use particular allocator through vale-ctl -m option
754 if (nmd->nm_grp != id) {
756 nm_prerr("iommu group mismatch: %d vs %d",
757 nmd->nm_grp, id);
758 nmd->lasterr = err = ENOMEM;
819 p = nmd->pools;
821 for (i = 0; i < NETMAP_POOLS_NR; offset -= p[i].memtotal, i++) {
859 * 1 - allocate a Memory Descriptor List wide as the sum
861 * 2 - cycle all the objects in every pool and for every object do
863 * 2a - cycle all the objects in every pool, get the list
865 * 2b - calculate the offset in the array of pages descriptor in the
867 * 2c - copy the descriptors of the object in the main MDL
869 * 3 - return the resulting MDL that needs to be mapped in userland
896 struct netmap_obj_pool *p = &nmd->pools[i];
897 int clsz = p->_clustsize;
898 int clobjs = p->_clustentries; /* objects per cluster */
903 tempMdl = IoAllocateMdl(p->lut[0].vaddr, clsz, FALSE, FALSE, NULL);
912 for (j = 0; j < p->numclusters; j++, ofs += clsz) {
914 MmInitializeMdl(tempMdl, p->lut[j*clobjs].vaddr, clsz);
915 MmBuildMdlForNonPagedPool(tempMdl); /* compute physical page addresses */
917 mainMdl->MdlFlags = tempMdl->MdlFlags; /* XXX what is in here ? */
928 * helper function for OS-specific mmap routines (currently only windows).
940 if (!(nmd->flags & NETMAP_MEM_FINALIZED)) {
944 *clustsize = nmd->pools[pool]._clustsize;
945 *numclusters = nmd->pools[pool].numclusters;
958 if (nmd->flags & NETMAP_MEM_FINALIZED) {
959 *size = nmd->nm_totalsize;
964 struct netmap_obj_pool *p = nmd->pools + i;
965 *size += ((size_t)p->_numclusters * (size_t)p->_clustsize);
970 *memflags = nmd->flags;
972 *id = nmd->nm_id;
986 int i, k = p->_clustentries, n = p->objtotal;
989 for (i = 0; i < n; i += k, ofs += p->_clustsize) {
990 const char *base = p->lut[i].vaddr;
991 ssize_t relofs = (const char *) vaddr - base;
993 if (relofs < 0 || relofs >= p->_clustsize)
998 p->name, ofs, i, vaddr);
1002 vaddr, p->name);
1006 /* Helper functions which convert virtual addresses to offsets */
1008 netmap_obj_offset(&(n)->pools[NETMAP_IF_POOL], (v))
1011 ((n)->pools[NETMAP_IF_POOL].memtotal + \
1012 netmap_obj_offset(&(n)->pools[NETMAP_RING_POOL], (v)))
1031 if (len > p->_objsize) {
1032 nm_prerr("%s request size %d too large", p->name, len);
1036 if (p->objfree == 0) {
1037 nm_prerr("no more %s objects", p->name);
1043 /* termination is guaranteed by p->free, but better check bounds on i */
1044 while (vaddr == NULL && i < p->bitmap_slots) {
1045 uint32_t cur = p->bitmap[i];
1054 p->bitmap[i] &= ~mask; /* mark object as in use */
1055 p->objfree--;
1057 vaddr = p->lut[i * 32 + j].vaddr;
1061 nm_prdis("%s allocator: allocated object @ [%d][%d]: vaddr %p",p->name, i, j, vaddr);
1078 if (j >= p->objtotal) {
1079 nm_prerr("invalid index %u, max %u", j, p->objtotal);
1082 ptr = &p->bitmap[j / 32];
1089 p->objfree++;
1101 u_int i, j, n = p->numclusters;
1103 for (i = 0, j = 0; i < n; i++, j += p->_clustentries) {
1104 void *base = p->lut[i * p->_clustentries].vaddr;
1105 ssize_t relofs = (ssize_t) vaddr - (ssize_t) base;
1108 if (base == NULL || vaddr < base || relofs >= p->_clustsize)
1111 j = j + relofs / p->_objsize;
1117 vaddr, p->name);
1123 return nmd->pools[NETMAP_BUF_POOL]._objsize;
1126 #define netmap_if_malloc(n, len) netmap_obj_malloc(&(n)->pools[NETMAP_IF_POOL], len, NULL, NULL)
1127 #define netmap_if_free(n, v) netmap_obj_free_va(&(n)->pools[NETMAP_IF_POOL], (v))
1128 #define netmap_ring_malloc(n, len) netmap_obj_malloc(&(n)->pools[NETMAP_RING_POOL], len, NULL, NULL)
1129 #define netmap_ring_free(n, v) netmap_obj_free_va(&(n)->pools[NETMAP_RING_POOL], (v))
1131 netmap_obj_malloc(&(n)->pools[NETMAP_BUF_POOL], netmap_mem_bufsize(n), _pos, _index)
1137 (netmap_obj_offset(&(n)->pools[NETMAP_BUF_POOL], (v)) / NETMAP_BDG_BUF_SIZE(n))
1147 struct netmap_mem_d *nmd = na->nm_mem;
1161 nm_prdis(5, "allocate buffer %d -> %d", *head, cur);
1173 struct lut_entry *lut = na->na_lut.lut;
1174 struct netmap_mem_d *nmd = na->nm_mem;
1175 struct netmap_obj_pool *p = &nmd->pools[NETMAP_BUF_POOL];
1179 for (i = 0; head >=2 && head < p->objtotal; i++) {
1198 struct netmap_obj_pool *p = &nmd->pools[NETMAP_BUF_POOL];
1200 uint32_t pos = 0; /* slot in p->bitmap */
1210 slot[i].len = p->_objsize;
1215 nm_prdis("%s: allocated %d buffers, %d available, first at %d", p->name, n, p->objfree, pos);
1220 i--;
1230 struct netmap_obj_pool *p = &nmd->pools[NETMAP_BUF_POOL];
1235 slot[i].len = p->_objsize;
1244 struct netmap_obj_pool *p = &nmd->pools[NETMAP_BUF_POOL];
1246 if (i < 2 || i >= p->objtotal) {
1247 nm_prerr("Cannot free buf#%d: should be in [2, %d[", i, p->objtotal);
1264 p->name, p->objfree);
1273 if (p->bitmap)
1274 nm_os_free(p->bitmap);
1275 p->bitmap = NULL;
1276 if (p->invalid_bitmap)
1277 nm_os_free(p->invalid_bitmap);
1278 p->invalid_bitmap = NULL;
1279 if (!p->alloc_done) {
1285 if (p->lut) {
1291 * addresses are stored at multiples of p->_clusterentries
1294 for (i = 0; i < p->objtotal; i += p->_clustentries) {
1295 free(p->lut[i].vaddr, M_NETMAP);
1297 nm_free_lut(p->lut, p->objtotal);
1299 p->lut = NULL;
1300 p->objtotal = 0;
1301 p->memtotal = 0;
1302 p->numclusters = 0;
1303 p->objfree = 0;
1304 p->alloc_done = 0;
1325 * XXX note -- userspace needs the buffers to be contiguous,
1340 p->r_objtotal = objtotal;
1341 p->r_objsize = objsize;
1351 i = (objsize & (LINE_ROUND - 1));
1353 nm_prinf("aligning object by %d bytes", LINE_ROUND - i);
1354 objsize += LINE_ROUND - i;
1356 if (objsize < p->objminsize || objsize > p->objmaxsize) {
1358 objsize, p->objminsize, p->objmaxsize);
1361 if (objtotal < p->nummin || objtotal > p->nummax) {
1363 objtotal, p->nummin, p->nummax);
1367 * Compute number of objects using a brute-force approach:
1397 p->_clustentries = clustentries;
1398 p->_clustsize = clustsize;
1399 p->_numclusters = (objtotal + clustentries - 1) / clustentries;
1402 p->_objsize = objsize;
1403 p->_objtotal = p->_numclusters * clustentries;
1414 if (p->lut) {
1426 p->numclusters = p->_numclusters;
1427 p->objtotal = p->_objtotal;
1428 p->alloc_done = 1;
1430 p->lut = nm_alloc_lut(p->objtotal);
1431 if (p->lut == NULL) {
1432 nm_prerr("Unable to create lookup table for '%s'", p->name);
1440 for (i = 0; i < (int)p->objtotal;) {
1441 int lim = i + p->_clustentries;
1451 if (nmd->nm_numa_domain == -1) {
1452 clust = contigmalloc(p->_clustsize, M_NETMAP,
1453 M_NOWAIT | M_ZERO, (size_t)0, -1UL, PAGE_SIZE, 0);
1457 ds = DOMAINSET_PREF(nmd->nm_numa_domain);
1458 clust = contigmalloc_domainset(p->_clustsize, M_NETMAP,
1459 ds, M_NOWAIT | M_ZERO, (size_t)0, -1UL, PAGE_SIZE, 0);
1467 i, p->name);
1471 for (i--; i >= lim; i--) {
1472 if (i % p->_clustentries == 0 && p->lut[i].vaddr)
1473 free(p->lut[i].vaddr, M_NETMAP);
1474 p->lut[i].vaddr = NULL;
1477 p->objtotal = i;
1479 p->numclusters = (i + p->_clustentries - 1) / p->_clustentries;
1490 * of p->_objsize.
1492 for (; i < lim; i++, clust += p->_objsize) {
1493 p->lut[i].vaddr = clust;
1495 p->lut[i].paddr = vtophys(clust);
1499 p->memtotal = (size_t)p->numclusters * (size_t)p->_clustsize;
1501 nm_prinf("Pre-allocated %d clusters (%d/%zuKB) for '%s'",
1502 p->numclusters, p->_clustsize >> 10,
1503 p->memtotal >> 10, p->name);
1536 netmap_reset_obj_allocator(&nmd->pools[i]);
1538 nmd->flags &= ~NETMAP_MEM_FINALIZED;
1544 int i, lim = p->objtotal;
1546 if (na == NULL || na->pdev == NULL)
1549 lut = &na->na_lut;
1565 nm_prdis("unmapping and freeing plut for %s", na->name);
1566 if (lut->plut == NULL || na->pdev == NULL)
1568 for (i = 0; i < lim; i += p->_clustentries) {
1569 if (lut->plut[i].paddr)
1570 netmap_unload_map(na, (bus_dma_tag_t) na->pdev, &lut->plut[i].paddr, p->_clustsize);
1572 nm_free_plut(lut->plut);
1573 lut->plut = NULL;
1583 int i, lim = p->objtotal;
1584 struct netmap_lut *lut = &na->na_lut;
1586 if (na->pdev == NULL)
1602 if (lut->plut != NULL) {
1603 nm_prdis("plut already allocated for %s", na->name);
1607 nm_prdis("allocating physical lut for %s", na->name);
1608 lut->plut = nm_alloc_plut(lim);
1609 if (lut->plut == NULL) {
1610 nm_prerr("Failed to allocate physical lut for %s", na->name);
1614 for (i = 0; i < lim; i += p->_clustentries) {
1615 lut->plut[i].paddr = 0;
1618 for (i = 0; i < lim; i += p->_clustentries) {
1621 if (p->lut[i].vaddr == NULL)
1624 error = netmap_load_map(na, (bus_dma_tag_t) na->pdev, &lut->plut[i].paddr,
1625 p->lut[i].vaddr, p->_clustsize);
1627 nm_prerr("Failed to map cluster #%d from the %s pool", i, p->name);
1631 for (j = 1; j < p->_clustentries; j++) {
1632 lut->plut[i + j].paddr = lut->plut[i + j - 1].paddr + p->_objsize;
1648 if (nmd->flags & NETMAP_MEM_FINALIZED)
1650 nmd->lasterr = 0;
1651 nmd->nm_totalsize = 0;
1653 nmd->lasterr = netmap_finalize_obj_allocator(nmd, &nmd->pools[i]);
1654 if (nmd->lasterr)
1656 nmd->nm_totalsize += nmd->pools[i].memtotal;
1658 nmd->nm_totalsize = (nmd->nm_totalsize + PAGE_SIZE - 1) & ~(PAGE_SIZE - 1);
1659 nmd->lasterr = netmap_mem_init_bitmaps(nmd);
1660 if (nmd->lasterr)
1663 nmd->flags |= NETMAP_MEM_FINALIZED;
1667 nmd->pools[NETMAP_IF_POOL].memtotal >> 10,
1668 nmd->pools[NETMAP_RING_POOL].memtotal >> 10,
1669 nmd->pools[NETMAP_BUF_POOL].memtotal >> 20);
1672 nm_prinf("Free buffers: %d", nmd->pools[NETMAP_BUF_POOL].objfree);
1678 return nmd->lasterr;
1704 d->ops = ops;
1709 snprintf(d->name, NM_MEM_NAMESZ, "%d", d->nm_id);
1712 snprintf(d->pools[i].name, NETMAP_POOL_MAX_NAMSZ,
1714 d->name);
1718 nm_prerr("%s: request too large", d->pools[i].name);
1722 memtotal -= poolsz;
1724 d->params[i].num = p[i].num;
1725 d->params[i].size = p[i].size;
1728 uint64_t sz = d->params[NETMAP_BUF_POOL].size;
1729 uint64_t n = (memtotal + sz - 1) / sz;
1734 d->pools[NETMAP_BUF_POOL].name,
1737 d->params[NETMAP_BUF_POOL].num += n;
1747 d->flags &= ~NETMAP_MEM_FINALIZED;
1816 d = _netmap_mem_private_new(sizeof(*d), p, -1, &netmap_mem_global_ops, 0, perr);
1821 /* Reference IOMMU and NUMA local allocator - find existing or create new,
1822 * for non-hw adapters, fall back to global allocator.
1830 if (na == NULL || na->pdev == NULL)
1833 domain = nm_numa_domain(na->pdev);
1834 grp_id = nm_iommu_group_id(na->pdev);
1839 if (!(nmd->flags & NETMAP_MEM_HIDDEN) &&
1840 nmd->nm_grp == grp_id && nmd->nm_numa_domain == domain) {
1841 nmd->refcount++;
1846 nmd = nmd->next;
1859 snprintf(nmd->name, sizeof(nmd->name), "%d", nmd->nm_id);
1862 snprintf(nmd->pools[i].name, NETMAP_POOL_MAX_NAMSZ, "%s-%s",
1863 nm_mem_blueprint.pools[i].name, nmd->name);
1884 if (!netmap_mem_params_changed(nmd->params))
1889 if (nmd->flags & NETMAP_MEM_FINALIZED) {
1892 netmap_reset_obj_allocator(&nmd->pools[i]);
1894 nmd->flags &= ~NETMAP_MEM_FINALIZED;
1898 nmd->lasterr = netmap_config_obj_allocator(&nmd->pools[i],
1899 nmd->params[i].num, nmd->params[i].size);
1900 if (nmd->lasterr)
1906 return nmd->lasterr;
1912 if (nmd->flags & NETMAP_MEM_FINALIZED)
1918 nmd->lasterr = 0;
1921 return nmd->lasterr;
1930 netmap_destroy_obj_allocator(&nmd->pools[i]);
1966 return kring->ring == NULL &&
1967 (kring->users > 0 ||
1968 (kring->nr_kflags & NKR_NEEDRING));
1974 return kring->ring != NULL &&
1975 kring->users == 0 &&
1976 !(kring->nr_kflags & NKR_NEEDRING);
1997 struct netmap_ring *ring = kring->ring;
2004 kring->name, ring, kring->users, kring->nr_kflags & NKR_NEEDRING);
2008 nm_prinf("creating %s", kring->name);
2009 ndesc = kring->nkr_num_slots;
2018 kring->ring = ring;
2019 *(uint32_t *)(uintptr_t)&ring->num_slots = ndesc;
2020 *(int64_t *)(uintptr_t)&ring->buf_ofs =
2021 (nmd->pools[NETMAP_IF_POOL].memtotal +
2022 nmd->pools[NETMAP_RING_POOL].memtotal) -
2026 ring->head = kring->rhead;
2027 ring->cur = kring->rcur;
2028 ring->tail = kring->rtail;
2029 *(uint32_t *)(uintptr_t)&ring->nr_buf_size =
2031 nm_prdis("%s h %d c %d t %d", kring->name,
2032 ring->head, ring->cur, ring->tail);
2034 if (!(kring->nr_kflags & NKR_FAKERING)) {
2037 nm_prinf("allocating buffers for %s", kring->name);
2038 if (netmap_new_bufs(nmd, ring->slot, ndesc)) {
2045 nm_prinf("NOT allocating buffers for %s", kring->name);
2046 netmap_mem_set_ring(nmd, ring->slot, ndesc, 0);
2049 *(uint16_t *)(uintptr_t)&ring->ringid = kring->ring_id;
2050 *(uint16_t *)(uintptr_t)&ring->dir = kring->tx;
2057 /* we cannot actually cleanup here, since we don't own kring->users
2058 * and kring->nr_klags & NKR_NEEDRING. The caller must decrement
2059 * the first or zero-out the second, then call netmap_free_rings()
2075 struct netmap_ring *ring = kring->ring;
2080 kring->name, ring, kring->users, kring->nr_kflags & NKR_NEEDRING);
2084 nm_prinf("deleting ring %s", kring->name);
2085 if (!(kring->nr_kflags & NKR_FAKERING)) {
2086 nm_prdis("freeing bufs for %s", kring->name);
2087 netmap_free_bufs(nmd, ring->slot, kring->nkr_num_slots);
2089 nm_prdis("NOT freeing bufs for %s", kring->name);
2092 kring->ring = NULL;
2099 * Allocate the per-fd structure netmap_if.
2131 /* initialize base fields -- override const */
2132 *(u_int *)(uintptr_t)&nifp->ni_tx_rings = na->num_tx_rings;
2133 *(u_int *)(uintptr_t)&nifp->ni_rx_rings = na->num_rx_rings;
2134 *(u_int *)(uintptr_t)&nifp->ni_host_tx_rings =
2135 (na->num_host_tx_rings ? na->num_host_tx_rings : 1);
2136 *(u_int *)(uintptr_t)&nifp->ni_host_rx_rings =
2137 (na->num_host_rx_rings ? na->num_host_rx_rings : 1);
2138 strlcpy(nifp->ni_name, na->name, sizeof(nifp->ni_name));
2151 if (na->tx_rings[i]->ring != NULL && i >= priv->np_qfirst[NR_TX]
2152 && i < priv->np_qlast[NR_TX]) {
2154 na->tx_rings[i]->ring) - base;
2156 *(ssize_t *)(uintptr_t)&nifp->ring_ofs[i] = ofs;
2163 if (na->rx_rings[i]->ring != NULL && i >= priv->np_qfirst[NR_RX]
2164 && i < priv->np_qlast[NR_RX]) {
2166 na->rx_rings[i]->ring) - base;
2168 *(ssize_t *)(uintptr_t)&nifp->ring_ofs[i+n[NR_TX]] = ofs;
2181 if (nifp->ni_bufs_head)
2182 netmap_extra_free(na, nifp->ni_bufs_head);
2191 nm_prinf("active = %d", nmd->active);
2216 ret = netmap_mem_get_info(nmd, &req->nr_memsize, NULL,
2217 &req->nr_mem_id);
2223 req->nr_if_pool_offset = 0;
2224 req->nr_if_pool_objtotal = nmd->pools[NETMAP_IF_POOL].objtotal;
2225 req->nr_if_pool_objsize = nmd->pools[NETMAP_IF_POOL]._objsize;
2227 req->nr_ring_pool_offset = nmd->pools[NETMAP_IF_POOL].memtotal;
2228 req->nr_ring_pool_objtotal = nmd->pools[NETMAP_RING_POOL].objtotal;
2229 req->nr_ring_pool_objsize = nmd->pools[NETMAP_RING_POOL]._objsize;
2231 req->nr_buf_pool_offset = nmd->pools[NETMAP_IF_POOL].memtotal +
2232 nmd->pools[NETMAP_RING_POOL].memtotal;
2233 req->nr_buf_pool_objtotal = nmd->pools[NETMAP_BUF_POOL].objtotal;
2234 req->nr_buf_pool_objsize = nmd->pools[NETMAP_BUF_POOL]._objsize;
2254 netmap_mem_ext_list->prev = e;
2255 e->next = netmap_mem_ext_list;
2257 e->prev = NULL;
2265 if (e->prev)
2266 e->prev->next = e->next;
2268 netmap_mem_ext_list = e->next;
2269 if (e->next)
2270 e->next->prev = e->prev;
2271 e->prev = e->next = NULL;
2280 for (e = netmap_mem_ext_list; e; e = e->next) {
2281 if (nm_os_extmem_isequal(e->os, os)) {
2282 netmap_mem_get(&e->up);
2301 struct netmap_obj_pool *p = &d->pools[i];
2303 if (p->lut) {
2304 nm_free_lut(p->lut, p->objtotal);
2305 p->lut = NULL;
2308 if (e->os)
2309 nm_os_extmem_delete(e->os);
2346 if (pi->nr_if_pool_objtotal == 0)
2347 pi->nr_if_pool_objtotal = netmap_min_priv_params[NETMAP_IF_POOL].num;
2348 if (pi->nr_if_pool_objsize == 0)
2349 pi->nr_if_pool_objsize = netmap_min_priv_params[NETMAP_IF_POOL].size;
2350 if (pi->nr_ring_pool_objtotal == 0)
2351 pi->nr_ring_pool_objtotal = netmap_min_priv_params[NETMAP_RING_POOL].num;
2352 if (pi->nr_ring_pool_objsize == 0)
2353 pi->nr_ring_pool_objsize = netmap_min_priv_params[NETMAP_RING_POOL].size;
2354 if (pi->nr_buf_pool_objtotal == 0)
2355 pi->nr_buf_pool_objtotal = netmap_min_priv_params[NETMAP_BUF_POOL].num;
2356 if (pi->nr_buf_pool_objsize == 0)
2357 pi->nr_buf_pool_objsize = netmap_min_priv_params[NETMAP_BUF_POOL].size;
2360 pi->nr_if_pool_objtotal, pi->nr_if_pool_objsize,
2361 pi->nr_ring_pool_objtotal, pi->nr_ring_pool_objsize,
2362 pi->nr_buf_pool_objtotal, pi->nr_buf_pool_objsize);
2373 return &nme->up;
2381 { pi->nr_if_pool_objsize, pi->nr_if_pool_objtotal },
2382 { pi->nr_ring_pool_objsize, pi->nr_ring_pool_objtotal },
2383 { pi->nr_buf_pool_objsize, pi->nr_buf_pool_objtotal }},
2384 -1,
2386 pi->nr_memsize,
2396 nme->os = os;
2399 clust = nm_os_extmem_nextpage(nme->os);
2402 struct netmap_obj_pool *p = &nme->up.pools[i];
2403 struct netmap_obj_params *o = &nme->up.params[i];
2405 p->_objsize = o->size;
2406 p->_clustsize = o->size;
2407 p->_clustentries = 1;
2409 p->lut = nm_alloc_lut(o->num);
2410 if (p->lut == NULL) {
2415 p->bitmap_slots = (o->num + sizeof(uint32_t) - 1) / sizeof(uint32_t);
2416 p->invalid_bitmap = nm_os_malloc(sizeof(uint32_t) * p->bitmap_slots);
2417 if (p->invalid_bitmap == NULL) {
2423 p->objtotal = 0;
2424 p->memtotal = 0;
2425 p->objfree = 0;
2429 for (j = 0; j < o->num && nr_pages > 0; j++) {
2432 p->lut[j].vaddr = clust + off;
2434 p->lut[j].paddr = vtophys(p->lut[j].vaddr);
2436 nm_prdis("%s %d at %p", p->name, j, p->lut[j].vaddr);
2437 noff = off + p->_objsize;
2445 noff -= PAGE_SIZE;
2446 clust = nm_os_extmem_nextpage(nme->os);
2447 nr_pages--;
2450 if (noff > 0 && !nm_isset(p->invalid_bitmap, j) &&
2457 p->invalid_bitmap[ (j>>5) ] |= 1U << (j & 31U);
2465 p->objtotal = j;
2466 p->numclusters = p->objtotal;
2467 p->memtotal = j * (size_t)p->_objsize;
2468 nm_prdis("%d memtotal %zu", j, p->memtotal);
2473 return &nme->up;
2476 netmap_mem_put(&nme->up);
2522 ptif->ifp = ifp;
2523 ptif->nifp_offset = nifp_offset;
2525 if (ptnmd->pt_ifs) {
2526 ptif->next = ptnmd->pt_ifs;
2528 ptnmd->pt_ifs = ptif;
2533 if_name(ptif->ifp), ptif->nifp_offset);
2545 for (curr = ptnmd->pt_ifs; curr; curr = curr->next) {
2546 if (curr->ifp == ifp) {
2561 int ret = -1;
2565 for (curr = ptnmd->pt_ifs; curr; curr = curr->next) {
2566 if (curr->ifp == ifp) {
2568 prev->next = curr->next;
2570 ptnmd->pt_ifs = curr->next;
2573 if_name(curr->ifp), curr->nifp_offset);
2591 if (!(nmd->flags & NETMAP_MEM_FINALIZED)) {
2595 *lut = ptnmd->buf_lut;
2605 error = nmd->ops->nmd_config(nmd);
2610 *size = nmd->nm_totalsize;
2612 *memflags = nmd->flags;
2614 *id = nmd->nm_id;
2626 /* if the offset is valid, just return csb->base_addr + off */
2627 paddr = (vm_paddr_t)(ptnmd->nm_paddr + off);
2654 if (nmd->flags & NETMAP_MEM_FINALIZED)
2657 if (ptnmd->ptn_dev == NULL) {
2662 /* Map memory through ptnetmap-memdev BAR. */
2663 error = nm_os_pt_memdev_iomap(ptnmd->ptn_dev, &ptnmd->nm_paddr,
2664 &ptnmd->nm_addr, &mem_size);
2670 bufsize = nm_os_pt_memdev_ioread(ptnmd->ptn_dev,
2672 nbuffers = nm_os_pt_memdev_ioread(ptnmd->ptn_dev,
2676 if (ptnmd->buf_lut.lut == NULL) {
2678 ptnmd->buf_lut.lut = nm_alloc_lut(nbuffers);
2679 if (ptnmd->buf_lut.lut == NULL) {
2686 poolofs = nm_os_pt_memdev_ioread(ptnmd->ptn_dev,
2688 vaddr = (char *)(ptnmd->nm_addr) + poolofs;
2689 paddr = ptnmd->nm_paddr + poolofs;
2692 ptnmd->buf_lut.lut[i].vaddr = vaddr;
2697 ptnmd->buf_lut.objtotal = nbuffers;
2698 ptnmd->buf_lut.objsize = bufsize;
2699 nmd->nm_totalsize = mem_size;
2704 * replication? maybe we nmd->pools[] should no be
2706 nmd->pools[NETMAP_BUF_POOL]._objsize = bufsize;
2707 nmd->pools[NETMAP_BUF_POOL]._objtotal = nbuffers;
2709 nmd->flags |= NETMAP_MEM_FINALIZED;
2719 if (nmd->active == 1 &&
2720 (nmd->flags & NETMAP_MEM_FINALIZED)) {
2721 nmd->flags &= ~NETMAP_MEM_FINALIZED;
2722 /* unmap ptnetmap-memdev memory */
2723 if (ptnmd->ptn_dev) {
2724 nm_os_pt_memdev_iounmap(ptnmd->ptn_dev);
2726 ptnmd->nm_addr = NULL;
2727 ptnmd->nm_paddr = 0;
2736 return (const char *)(vaddr) - (char *)(ptnmd->nm_addr);
2746 if (nmd->active > 0)
2747 nm_prerr("bug: deleting mem allocator with active=%d!", nmd->active);
2762 ptif = netmap_mem_pt_guest_ifp_lookup(nmd, na->ifp);
2764 nm_prerr("interface %s is not in passthrough", na->name);
2768 nifp = (struct netmap_if *)((char *)(ptnmd->nm_addr) +
2769 ptif->nifp_offset);
2780 ptif = netmap_mem_pt_guest_ifp_lookup(nmd, na->ifp);
2782 nm_prerr("interface %s is not in passthrough", na->name);
2793 int i, error = -1;
2795 ptif = netmap_mem_pt_guest_ifp_lookup(nmd, na->ifp);
2797 nm_prerr("interface %s is not in passthrough", na->name);
2803 nifp = (struct netmap_if *)((char *)ptnmd->nm_addr + ptif->nifp_offset);
2805 struct netmap_kring *kring = na->tx_rings[i];
2806 if (kring->ring)
2808 kring->ring = (struct netmap_ring *)
2809 ((char *)nifp + nifp->ring_ofs[i]);
2812 struct netmap_kring *kring = na->rx_rings[i];
2813 if (kring->ring)
2815 kring->ring = (struct netmap_ring *)
2817 nifp->ring_ofs[netmap_all_rings(na, NR_TX) + i]);
2836 kring->ring = NULL;
2868 if (scan->ops->nmd_deref == netmap_mem_pt_guest_deref &&
2869 ((struct netmap_mem_ptg *)(scan))->host_mem_id == mem_id) {
2871 mem->refcount++;
2875 scan = scan->next;
2894 ptnmd->up.ops = &netmap_mem_pt_guest_ops;
2895 ptnmd->host_mem_id = mem_id;
2896 ptnmd->pt_ifs = NULL;
2899 err = nm_mem_assign_id_locked(&ptnmd->up, -1, -1);
2903 ptnmd->up.flags &= ~NETMAP_MEM_FINALIZED;
2904 ptnmd->up.flags |= NETMAP_MEM_IO;
2906 NMA_LOCK_INIT(&ptnmd->up);
2908 snprintf(ptnmd->up.name, NM_MEM_NAMESZ, "%d", ptnmd->up.nm_id);
2911 return &ptnmd->up;
2913 netmap_mem_pt_guest_delete(&ptnmd->up);
2957 ptnmd->ptn_dev = ptn_dev;