Lines Matching defs:nmd
192 netmap_mem_get_lut(struct netmap_mem_d *nmd, struct netmap_lut *lut)
196 NMA_LOCK(nmd);
197 rv = nmd->ops->nmd_get_lut(nmd, lut);
198 NMA_UNLOCK(nmd);
204 netmap_mem_get_info(struct netmap_mem_d *nmd, uint64_t *size,
209 NMA_LOCK(nmd);
210 rv = nmd->ops->nmd_get_info(nmd, size, memflags, memid);
211 NMA_UNLOCK(nmd);
217 netmap_mem_ofstophys(struct netmap_mem_d *nmd, vm_ooffset_t off)
225 NMA_SPINLOCK(nmd);
227 NMA_LOCK(nmd);
229 pa = nmd->ops->nmd_ofstophys(nmd, off);
230 NMA_UNLOCK(nmd);
236 netmap_mem_config(struct netmap_mem_d *nmd)
238 if (nmd->active) {
245 return nmd->ops->nmd_config(nmd);
249 netmap_mem_if_offset(struct netmap_mem_d *nmd, const void *off)
253 NMA_LOCK(nmd);
254 rv = nmd->ops->nmd_if_offset(nmd, off);
255 NMA_UNLOCK(nmd);
261 netmap_mem_delete(struct netmap_mem_d *nmd)
263 nmd->ops->nmd_delete(nmd);
270 struct netmap_mem_d *nmd = na->nm_mem;
272 NMA_LOCK(nmd);
273 nifp = nmd->ops->nmd_if_new(nmd, na, priv);
274 NMA_UNLOCK(nmd);
282 struct netmap_mem_d *nmd = na->nm_mem;
284 NMA_LOCK(nmd);
285 nmd->ops->nmd_if_delete(nmd, na, nif);
286 NMA_UNLOCK(nmd);
293 struct netmap_mem_d *nmd = na->nm_mem;
295 NMA_LOCK(nmd);
296 rv = nmd->ops->nmd_rings_create(nmd, na);
297 NMA_UNLOCK(nmd);
305 struct netmap_mem_d *nmd = na->nm_mem;
307 NMA_LOCK(nmd);
308 nmd->ops->nmd_rings_delete(nmd, na);
309 NMA_UNLOCK(nmd);
318 netmap_mem_get_id(struct netmap_mem_d *nmd)
320 return nmd->nm_id;
324 #define NM_DBG_REFC(nmd, func, line) \
325 nm_prinf("%s:%d mem[%d:%d] -> %d", func, line, (nmd)->nm_id, (nmd)->nm_grp, (nmd)->refcount);
327 #define NM_DBG_REFC(nmd, func, line)
335 __netmap_mem_get(struct netmap_mem_d *nmd, const char *func, int line)
338 nmd->refcount++;
339 NM_DBG_REFC(nmd, func, line);
341 return nmd;
345 __netmap_mem_put(struct netmap_mem_d *nmd, const char *func, int line)
349 last = (--nmd->refcount == 0);
351 nm_mem_release_id(nmd);
352 NM_DBG_REFC(nmd, func, line);
355 netmap_mem_delete(nmd);
359 netmap_mem_finalize(struct netmap_mem_d *nmd, struct netmap_adapter *na)
362 if (nm_mem_check_group(nmd, na->pdev) < 0) {
366 NMA_LOCK(nmd);
368 if (netmap_mem_config(nmd))
371 nmd->active++;
373 nmd->lasterr = nmd->ops->nmd_finalize(nmd, na);
375 if (!nmd->lasterr && !(nmd->flags & NETMAP_MEM_NOMAP)) {
376 nmd->lasterr = netmap_mem_map(&nmd->pools[NETMAP_BUF_POOL], na);
380 lasterr = nmd->lasterr;
381 NMA_UNLOCK(nmd);
384 netmap_mem_deref(nmd, na);
443 netmap_mem_init_bitmaps(struct netmap_mem_d *nmd)
448 struct netmap_obj_pool *p = &nmd->pools[i];
458 if (nmd->pools[NETMAP_BUF_POOL].objfree < 2) {
459 nm_prerr("%s: not enough buffers", nmd->pools[NETMAP_BUF_POOL].name);
463 nmd->pools[NETMAP_BUF_POOL].objfree -= 2;
464 if (nmd->pools[NETMAP_BUF_POOL].bitmap) {
469 nmd->pools[NETMAP_BUF_POOL].bitmap[0] = ~3U;
475 netmap_mem_deref(struct netmap_mem_d *nmd, struct netmap_adapter *na)
478 NMA_LOCK(nmd);
479 if (na->active_fds <= 0 && !(nmd->flags & NETMAP_MEM_NOMAP))
480 netmap_mem_unmap(&nmd->pools[NETMAP_BUF_POOL], na);
481 if (nmd->active == 1) {
488 netmap_mem_init_bitmaps(nmd);
490 nmd->ops->nmd_deref(nmd, na);
492 nmd->active--;
494 nmd->lasterr = 0;
497 NMA_UNLOCK(nmd);
504 netmap_mem2_get_lut(struct netmap_mem_d *nmd, struct netmap_lut *lut)
506 lut->lut = nmd->pools[NETMAP_BUF_POOL].lut;
510 lut->objtotal = nmd->pools[NETMAP_BUF_POOL].objtotal;
511 lut->objsize = nmd->pools[NETMAP_BUF_POOL]._objsize;
661 nm_mem_assign_id_locked(struct netmap_mem_d *nmd, int grp_id, int domain)
674 nmd->nm_id = id;
675 nmd->nm_grp = grp_id;
676 nmd->nm_numa_domain = domain;
677 nmd->prev = scan->prev;
678 nmd->next = scan;
679 scan->prev->next = nmd;
680 scan->prev = nmd;
681 netmap_last_mem_d = nmd;
682 nmd->refcount = 1;
683 NM_DBG_REFC(nmd, __FUNCTION__, __LINE__);
694 nm_mem_assign_id(struct netmap_mem_d *nmd, int grp_id)
699 ret = nm_mem_assign_id_locked(nmd, grp_id, -1);
707 nm_mem_release_id(struct netmap_mem_d *nmd)
709 nmd->prev->next = nmd->next;
710 nmd->next->prev = nmd->prev;
712 if (netmap_last_mem_d == nmd)
713 netmap_last_mem_d = nmd->prev;
715 nmd->prev = nmd->next = NULL;
721 struct netmap_mem_d *nmd;
724 nmd = netmap_last_mem_d;
726 if (!(nmd->flags & NETMAP_MEM_HIDDEN) && nmd->nm_id == id) {
727 nmd->refcount++;
728 NM_DBG_REFC(nmd, __FUNCTION__, __LINE__);
730 return nmd;
732 nmd = nmd->next;
733 } while (nmd != netmap_last_mem_d);
739 nm_mem_check_group(struct netmap_mem_d *nmd, void *dev)
752 NMA_LOCK(nmd);
754 if (nmd->nm_grp != id) {
757 nmd->nm_grp, id);
758 nmd->lasterr = err = ENOMEM;
761 NMA_UNLOCK(nmd);
812 netmap_mem2_ofstophys(struct netmap_mem_d* nmd, vm_ooffset_t offset)
819 p = nmd->pools;
876 win32_build_user_vm_map(struct netmap_mem_d* nmd)
883 if (netmap_mem_get_info(nmd, &memsize, &memflags, NULL)) {
894 NMA_LOCK(nmd);
896 struct netmap_obj_pool *p = &nmd->pools[i];
905 NMA_UNLOCK(nmd);
921 NMA_UNLOCK(nmd);
929 * Given an nmd and a pool index, returns the cluster size and number of clusters.
931 * It should be called under NMA_LOCK(nmd) otherwise the underlying info can change.
935 netmap_mem2_get_pool_info(struct netmap_mem_d* nmd, u_int pool, u_int *clustsize, u_int *numclusters)
937 if (!nmd || !clustsize || !numclusters || pool >= NETMAP_POOLS_NR)
939 // NMA_LOCK_ASSERT(nmd);
940 if (!(nmd->flags & NETMAP_MEM_FINALIZED)) {
944 *clustsize = nmd->pools[pool]._clustsize;
945 *numclusters = nmd->pools[pool].numclusters;
950 netmap_mem2_get_info(struct netmap_mem_d* nmd, uint64_t* size,
954 error = netmap_mem_config(nmd);
958 if (nmd->flags & NETMAP_MEM_FINALIZED) {
959 *size = nmd->nm_totalsize;
964 struct netmap_obj_pool *p = nmd->pools + i;
970 *memflags = nmd->flags;
972 *id = nmd->nm_id;
1015 netmap_mem2_if_offset(struct netmap_mem_d *nmd, const void *addr)
1017 return netmap_if_offset(nmd, addr);
1121 netmap_mem_bufsize(struct netmap_mem_d *nmd)
1123 return nmd->pools[NETMAP_BUF_POOL]._objsize;
1147 struct netmap_mem_d *nmd = na->nm_mem;
1150 NMA_LOCK(nmd);
1155 uint32_t *p = netmap_buf_malloc(nmd, &pos, head);
1165 NMA_UNLOCK(nmd);
1174 struct netmap_mem_d *nmd = na->nm_mem;
1175 struct netmap_obj_pool *p = &nmd->pools[NETMAP_BUF_POOL];
1196 netmap_new_bufs(struct netmap_mem_d *nmd, struct netmap_slot *slot, u_int n)
1198 struct netmap_obj_pool *p = &nmd->pools[NETMAP_BUF_POOL];
1204 void *vaddr = netmap_buf_malloc(nmd, &pos, &index);
1228 netmap_mem_set_ring(struct netmap_mem_d *nmd, struct netmap_slot *slot, u_int n, uint32_t index)
1230 struct netmap_obj_pool *p = &nmd->pools[NETMAP_BUF_POOL];
1242 netmap_free_buf(struct netmap_mem_d *nmd, uint32_t i)
1244 struct netmap_obj_pool *p = &nmd->pools[NETMAP_BUF_POOL];
1255 netmap_free_bufs(struct netmap_mem_d *nmd, struct netmap_slot *slot, u_int n)
1261 netmap_free_buf(nmd, slot[i].buf_idx);
1410 netmap_finalize_obj_allocator(struct netmap_mem_d *nmd, struct netmap_obj_pool *p)
1451 if (nmd->nm_numa_domain == -1) {
1457 ds = DOMAINSET_PREF(nmd->nm_numa_domain);
1529 netmap_mem_reset_all(struct netmap_mem_d *nmd)
1534 nm_prinf("resetting %p", nmd);
1536 netmap_reset_obj_allocator(&nmd->pools[i]);
1538 nmd->flags &= ~NETMAP_MEM_FINALIZED;
1645 netmap_mem_finalize_all(struct netmap_mem_d *nmd)
1648 if (nmd->flags & NETMAP_MEM_FINALIZED)
1650 nmd->lasterr = 0;
1651 nmd->nm_totalsize = 0;
1653 nmd->lasterr = netmap_finalize_obj_allocator(nmd, &nmd->pools[i]);
1654 if (nmd->lasterr)
1656 nmd->nm_totalsize += nmd->pools[i].memtotal;
1658 nmd->nm_totalsize = (nmd->nm_totalsize + PAGE_SIZE - 1) & ~(PAGE_SIZE - 1);
1659 nmd->lasterr = netmap_mem_init_bitmaps(nmd);
1660 if (nmd->lasterr)
1663 nmd->flags |= NETMAP_MEM_FINALIZED;
1667 nmd->pools[NETMAP_IF_POOL].memtotal >> 10,
1668 nmd->pools[NETMAP_RING_POOL].memtotal >> 10,
1669 nmd->pools[NETMAP_BUF_POOL].memtotal >> 20);
1672 nm_prinf("Free buffers: %d", nmd->pools[NETMAP_BUF_POOL].objfree);
1677 netmap_mem_reset_all(nmd);
1678 return nmd->lasterr;
1828 struct netmap_mem_d *nmd;
1837 nmd = netmap_last_mem_d;
1839 if (!(nmd->flags & NETMAP_MEM_HIDDEN) &&
1840 nmd->nm_grp == grp_id && nmd->nm_numa_domain == domain) {
1841 nmd->refcount++;
1842 NM_DBG_REFC(nmd, __FUNCTION__, __LINE__);
1844 return nmd;
1846 nmd = nmd->next;
1847 } while (nmd != netmap_last_mem_d);
1849 nmd = nm_os_malloc(sizeof(*nmd));
1850 if (nmd == NULL)
1853 *nmd = nm_mem_blueprint;
1855 err = nm_mem_assign_id_locked(nmd, grp_id, domain);
1859 snprintf(nmd->name, sizeof(nmd->name), "%d", nmd->nm_id);
1862 snprintf(nmd->pools[i].name, NETMAP_POOL_MAX_NAMSZ, "%s-%s",
1863 nm_mem_blueprint.pools[i].name, nmd->name);
1866 NMA_LOCK_INIT(nmd);
1869 return nmd;
1872 nm_os_free(nmd);
1880 netmap_mem2_config(struct netmap_mem_d *nmd)
1884 if (!netmap_mem_params_changed(nmd->params))
1889 if (nmd->flags & NETMAP_MEM_FINALIZED) {
1892 netmap_reset_obj_allocator(&nmd->pools[i]);
1894 nmd->flags &= ~NETMAP_MEM_FINALIZED;
1898 nmd->lasterr = netmap_config_obj_allocator(&nmd->pools[i],
1899 nmd->params[i].num, nmd->params[i].size);
1900 if (nmd->lasterr)
1906 return nmd->lasterr;
1910 netmap_mem2_finalize(struct netmap_mem_d *nmd, struct netmap_adapter *na)
1912 if (nmd->flags & NETMAP_MEM_FINALIZED)
1915 if (netmap_mem_finalize_all(nmd))
1918 nmd->lasterr = 0;
1921 return nmd->lasterr;
1925 netmap_mem2_delete(struct netmap_mem_d *nmd)
1930 netmap_destroy_obj_allocator(&nmd->pools[i]);
1933 NMA_LOCK_DESTROY(nmd);
1934 if (nmd != &nm_mem)
1935 nm_os_free(nmd);
1988 netmap_mem2_rings_create(struct netmap_mem_d *nmd, struct netmap_adapter *na)
2012 ring = netmap_ring_malloc(nmd, len);
2021 (nmd->pools[NETMAP_IF_POOL].memtotal +
2022 nmd->pools[NETMAP_RING_POOL].memtotal) -
2023 netmap_ring_offset(nmd, ring);
2030 netmap_mem_bufsize(nmd);
2038 if (netmap_new_bufs(nmd, ring->slot, ndesc)) {
2046 netmap_mem_set_ring(nmd, ring->slot, ndesc, 0);
2067 netmap_mem2_rings_delete(struct netmap_mem_d *nmd, struct netmap_adapter *na)
2087 netmap_free_bufs(nmd, ring->slot, kring->nkr_num_slots);
2091 netmap_ring_free(nmd, ring);
2106 netmap_mem2_if_new(struct netmap_mem_d *nmd,
2126 nifp = netmap_if_malloc(nmd, len);
2145 base = netmap_if_offset(nmd, nifp);
2153 ofs = netmap_ring_offset(nmd,
2165 ofs = netmap_ring_offset(nmd,
2175 netmap_mem2_if_delete(struct netmap_mem_d *nmd,
2183 netmap_if_free(nmd, nifp);
2187 netmap_mem2_deref(struct netmap_mem_d *nmd, struct netmap_adapter *na)
2191 nm_prinf("active = %d", nmd->active);
2212 struct netmap_mem_d *nmd)
2216 ret = netmap_mem_get_info(nmd, &req->nr_memsize, NULL,
2222 NMA_LOCK(nmd);
2224 req->nr_if_pool_objtotal = nmd->pools[NETMAP_IF_POOL].objtotal;
2225 req->nr_if_pool_objsize = nmd->pools[NETMAP_IF_POOL]._objsize;
2227 req->nr_ring_pool_offset = nmd->pools[NETMAP_IF_POOL].memtotal;
2228 req->nr_ring_pool_objtotal = nmd->pools[NETMAP_RING_POOL].objtotal;
2229 req->nr_ring_pool_objsize = nmd->pools[NETMAP_RING_POOL]._objsize;
2231 req->nr_buf_pool_offset = nmd->pools[NETMAP_IF_POOL].memtotal +
2232 nmd->pools[NETMAP_RING_POOL].memtotal;
2233 req->nr_buf_pool_objtotal = nmd->pools[NETMAP_BUF_POOL].objtotal;
2234 req->nr_buf_pool_objsize = nmd->pools[NETMAP_BUF_POOL]._objsize;
2235 NMA_UNLOCK(nmd);
2314 netmap_mem_ext_config(struct netmap_mem_d *nmd)
2510 netmap_mem_pt_guest_ifp_add(struct netmap_mem_d *nmd, if_t ifp,
2513 struct netmap_mem_ptg *ptnmd = (struct netmap_mem_ptg *)nmd;
2520 NMA_LOCK(nmd);
2530 NMA_UNLOCK(nmd);
2538 /* Called with NMA_LOCK(nmd) held. */
2540 netmap_mem_pt_guest_ifp_lookup(struct netmap_mem_d *nmd, if_t ifp)
2542 struct netmap_mem_ptg *ptnmd = (struct netmap_mem_ptg *)nmd;
2556 netmap_mem_pt_guest_ifp_del(struct netmap_mem_d *nmd, if_t ifp)
2558 struct netmap_mem_ptg *ptnmd = (struct netmap_mem_ptg *)nmd;
2563 NMA_LOCK(nmd);
2581 NMA_UNLOCK(nmd);
2587 netmap_mem_pt_guest_get_lut(struct netmap_mem_d *nmd, struct netmap_lut *lut)
2589 struct netmap_mem_ptg *ptnmd = (struct netmap_mem_ptg *)nmd;
2591 if (!(nmd->flags & NETMAP_MEM_FINALIZED)) {
2600 netmap_mem_pt_guest_get_info(struct netmap_mem_d *nmd, uint64_t *size,
2605 error = nmd->ops->nmd_config(nmd);
2610 *size = nmd->nm_totalsize;
2612 *memflags = nmd->flags;
2614 *id = nmd->nm_id;
2622 netmap_mem_pt_guest_ofstophys(struct netmap_mem_d *nmd, vm_ooffset_t off)
2624 struct netmap_mem_ptg *ptnmd = (struct netmap_mem_ptg *)nmd;
2633 netmap_mem_pt_guest_config(struct netmap_mem_d *nmd)
2642 netmap_mem_pt_guest_finalize(struct netmap_mem_d *nmd, struct netmap_adapter *na)
2644 struct netmap_mem_ptg *ptnmd = (struct netmap_mem_ptg *)nmd;
2654 if (nmd->flags & NETMAP_MEM_FINALIZED)
2699 nmd->nm_totalsize = mem_size;
2704 * replication? maybe we nmd->pools[] should no be
2706 nmd->pools[NETMAP_BUF_POOL]._objsize = bufsize;
2707 nmd->pools[NETMAP_BUF_POOL]._objtotal = nbuffers;
2709 nmd->flags |= NETMAP_MEM_FINALIZED;
2715 netmap_mem_pt_guest_deref(struct netmap_mem_d *nmd, struct netmap_adapter *na)
2717 struct netmap_mem_ptg *ptnmd = (struct netmap_mem_ptg *)nmd;
2719 if (nmd->active == 1 &&
2720 (nmd->flags & NETMAP_MEM_FINALIZED)) {
2721 nmd->flags &= ~NETMAP_MEM_FINALIZED;
2732 netmap_mem_pt_guest_if_offset(struct netmap_mem_d *nmd, const void *vaddr)
2734 struct netmap_mem_ptg *ptnmd = (struct netmap_mem_ptg *)nmd;
2740 netmap_mem_pt_guest_delete(struct netmap_mem_d *nmd)
2742 if (nmd == NULL)
2745 nm_prinf("deleting %p", nmd);
2746 if (nmd->active > 0)
2747 nm_prerr("bug: deleting mem allocator with active=%d!", nmd->active);
2749 nm_prinf("done deleting %p", nmd);
2750 NMA_LOCK_DESTROY(nmd);
2751 nm_os_free(nmd);
2755 netmap_mem_pt_guest_if_new(struct netmap_mem_d *nmd,
2758 struct netmap_mem_ptg *ptnmd = (struct netmap_mem_ptg *)nmd;
2762 ptif = netmap_mem_pt_guest_ifp_lookup(nmd, na->ifp);
2775 netmap_mem_pt_guest_if_delete(struct netmap_mem_d * nmd,
2780 ptif = netmap_mem_pt_guest_ifp_lookup(nmd, na->ifp);
2787 netmap_mem_pt_guest_rings_create(struct netmap_mem_d *nmd,
2790 struct netmap_mem_ptg *ptnmd = (struct netmap_mem_ptg *)nmd;
2795 ptif = netmap_mem_pt_guest_ifp_lookup(nmd, na->ifp);
2826 netmap_mem_pt_guest_rings_delete(struct netmap_mem_d *nmd, struct netmap_adapter *na)
2840 (void)nmd;
2924 struct netmap_mem_d *nmd;
2927 nmd = netmap_mem_pt_guest_find_memid(mem_id);
2928 if (nmd == NULL) {
2929 nmd = netmap_mem_pt_guest_create(mem_id);
2933 return nmd;
2949 struct netmap_mem_d *nmd;
2952 nmd = netmap_mem_pt_guest_get(mem_id);
2955 if (nmd) {
2956 ptnmd = (struct netmap_mem_ptg *)nmd;
2960 return nmd;
2969 struct netmap_mem_d *nmd;
2975 nmd = netmap_mem_pt_guest_get((nm_memid_t)memid);
2977 if (nmd) {
2978 netmap_mem_pt_guest_ifp_add(nmd, ifp, nifp_offset);
2981 return nmd;