1 /* 2 * Copyright (C) 2012-2013 Matteo Landi, Luigi Rizzo, Giuseppe Lettieri. All rights reserved. 3 * 4 * Redistribution and use in source and binary forms, with or without 5 * modification, are permitted provided that the following conditions 6 * are met: 7 * 1. Redistributions of source code must retain the above copyright 8 * notice, this list of conditions and the following disclaimer. 9 * 2. Redistributions in binary form must reproduce the above copyright 10 * notice, this list of conditions and the following disclaimer in the 11 * documentation and/or other materials provided with the distribution. 12 * 13 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 14 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 15 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 16 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 17 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 18 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 19 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 20 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 21 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 22 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 23 * SUCH DAMAGE. 24 */ 25 26 /* __FBSDID("$FreeBSD: head/sys/dev/netmap/netmap.c 241723 2012-10-19 09:41:45Z glebius $"); */ 27 28 #include <sys/types.h> 29 #include <sys/malloc.h> 30 #include <sys/proc.h> 31 #include <sys/socket.h> /* sockaddrs */ 32 #include <sys/sysctl.h> 33 #include <sys/bus.h> /* bus_dmamap_* */ 34 35 #include <vm/vm.h> /* vtophys */ 36 #include <vm/pmap.h> /* vtophys */ 37 38 #include <net/if.h> 39 #include <net/if_var.h> 40 #include <net/vnet.h> 41 #include <net/netmap.h> 42 43 #include "netmap_kern.h" 44 #include "netmap_mem2.h" 45 46 #define NMA_LOCK_INIT(n) lockinit(&(n)->nm_mtx, "netmap memory allocator lock", 0, 0) 47 #define NMA_LOCK_DESTROY(n) lockuninit(&(n)->nm_mtx) 48 #define NMA_LOCK(n) lockmgr(&(n)->nm_mtx, LK_EXCLUSIVE) 49 #define NMA_UNLOCK(n) lockmgr(&(n)->nm_mtx, LK_RELEASE) 50 51 struct netmap_obj_params netmap_params[NETMAP_POOLS_NR] = { 52 [NETMAP_IF_POOL] = { 53 .size = 1024, 54 .num = 100, 55 }, 56 [NETMAP_RING_POOL] = { 57 .size = 9*PAGE_SIZE, 58 .num = 200, 59 }, 60 [NETMAP_BUF_POOL] = { 61 .size = 2048, 62 .num = NETMAP_BUF_MAX_NUM, 63 }, 64 }; 65 66 67 /* 68 * nm_mem is the memory allocator used for all physical interfaces 69 * running in netmap mode. 70 * Virtual (VALE) ports will have each its own allocator. 71 */ 72 static int netmap_mem_global_config(struct netmap_mem_d *nmd); 73 static int netmap_mem_global_finalize(struct netmap_mem_d *nmd); 74 static void netmap_mem_global_deref(struct netmap_mem_d *nmd); 75 struct netmap_mem_d nm_mem = { /* Our memory allocator. */ 76 .pools = { 77 [NETMAP_IF_POOL] = { 78 .name = "netmap_if", 79 .objminsize = sizeof(struct netmap_if), 80 .objmaxsize = 4096, 81 .nummin = 10, /* don't be stingy */ 82 .nummax = 10000, /* XXX very large */ 83 }, 84 [NETMAP_RING_POOL] = { 85 .name = "netmap_ring", 86 .objminsize = sizeof(struct netmap_ring), 87 .objmaxsize = 32*PAGE_SIZE, 88 .nummin = 2, 89 .nummax = 1024, 90 }, 91 [NETMAP_BUF_POOL] = { 92 .name = "netmap_buf", 93 .objminsize = 64, 94 .objmaxsize = 65536, 95 .nummin = 4, 96 .nummax = 1000000, /* one million! */ 97 }, 98 }, 99 .config = netmap_mem_global_config, 100 .finalize = netmap_mem_global_finalize, 101 .deref = netmap_mem_global_deref, 102 }; 103 104 105 // XXX logically belongs to nm_mem 106 struct lut_entry *netmap_buffer_lut; /* exported */ 107 108 /* blueprint for the private memory allocators */ 109 static int netmap_mem_private_config(struct netmap_mem_d *nmd); 110 static int netmap_mem_private_finalize(struct netmap_mem_d *nmd); 111 static void netmap_mem_private_deref(struct netmap_mem_d *nmd); 112 const struct netmap_mem_d nm_blueprint = { 113 .pools = { 114 [NETMAP_IF_POOL] = { 115 .name = "%s_if", 116 .objminsize = sizeof(struct netmap_if), 117 .objmaxsize = 4096, 118 .nummin = 1, 119 .nummax = 10, 120 }, 121 [NETMAP_RING_POOL] = { 122 .name = "%s_ring", 123 .objminsize = sizeof(struct netmap_ring), 124 .objmaxsize = 32*PAGE_SIZE, 125 .nummin = 2, 126 .nummax = 1024, 127 }, 128 [NETMAP_BUF_POOL] = { 129 .name = "%s_buf", 130 .objminsize = 64, 131 .objmaxsize = 65536, 132 .nummin = 4, 133 .nummax = 1000000, /* one million! */ 134 }, 135 }, 136 .config = netmap_mem_private_config, 137 .finalize = netmap_mem_private_finalize, 138 .deref = netmap_mem_private_deref, 139 140 .flags = NETMAP_MEM_PRIVATE, 141 }; 142 143 /* memory allocator related sysctls */ 144 145 #define STRINGIFY(x) #x 146 147 148 #define DECLARE_SYSCTLS(id, name) \ 149 SYSCTL_INT(_dev_netmap, OID_AUTO, name##_size, \ 150 CTLFLAG_RW, &netmap_params[id].size, 0, "Requested size of netmap " STRINGIFY(name) "s"); \ 151 SYSCTL_INT(_dev_netmap, OID_AUTO, name##_curr_size, \ 152 CTLFLAG_RD, &nm_mem.pools[id]._objsize, 0, "Current size of netmap " STRINGIFY(name) "s"); \ 153 SYSCTL_INT(_dev_netmap, OID_AUTO, name##_num, \ 154 CTLFLAG_RW, &netmap_params[id].num, 0, "Requested number of netmap " STRINGIFY(name) "s"); \ 155 SYSCTL_INT(_dev_netmap, OID_AUTO, name##_curr_num, \ 156 CTLFLAG_RD, &nm_mem.pools[id].objtotal, 0, "Current number of netmap " STRINGIFY(name) "s") 157 158 SYSCTL_DECL(_dev_netmap); 159 DECLARE_SYSCTLS(NETMAP_IF_POOL, if); 160 DECLARE_SYSCTLS(NETMAP_RING_POOL, ring); 161 DECLARE_SYSCTLS(NETMAP_BUF_POOL, buf); 162 163 /* 164 * First, find the allocator that contains the requested offset, 165 * then locate the cluster through a lookup table. 166 */ 167 vm_paddr_t 168 netmap_mem_ofstophys(struct netmap_mem_d* nmd, vm_ooffset_t offset) 169 { 170 int i; 171 vm_ooffset_t o = offset; 172 vm_paddr_t pa; 173 struct netmap_obj_pool *p; 174 175 NMA_LOCK(nmd); 176 p = nmd->pools; 177 178 for (i = 0; i < NETMAP_POOLS_NR; offset -= p[i].memtotal, i++) { 179 if (offset >= p[i].memtotal) 180 continue; 181 // now lookup the cluster's address 182 pa = p[i].lut[offset / p[i]._objsize].paddr + 183 offset % p[i]._objsize; 184 NMA_UNLOCK(nmd); 185 return pa; 186 } 187 /* this is only in case of errors */ 188 D("invalid ofs 0x%x out of 0x%x 0x%x 0x%x", (u_int)o, 189 p[NETMAP_IF_POOL].memtotal, 190 p[NETMAP_IF_POOL].memtotal 191 + p[NETMAP_RING_POOL].memtotal, 192 p[NETMAP_IF_POOL].memtotal 193 + p[NETMAP_RING_POOL].memtotal 194 + p[NETMAP_BUF_POOL].memtotal); 195 NMA_UNLOCK(nmd); 196 return 0; // XXX bad address 197 } 198 199 int 200 netmap_mem_get_info(struct netmap_mem_d* nmd, u_int* size, u_int *memflags) 201 { 202 int error = 0; 203 NMA_LOCK(nmd); 204 error = nmd->config(nmd); 205 if (error) 206 goto out; 207 if (nmd->flags & NETMAP_MEM_FINALIZED) { 208 *size = nmd->nm_totalsize; 209 } else { 210 int i; 211 *size = 0; 212 for (i = 0; i < NETMAP_POOLS_NR; i++) { 213 struct netmap_obj_pool *p = nmd->pools + i; 214 *size += (p->_numclusters * p->_clustsize); 215 } 216 } 217 *memflags = nmd->flags; 218 out: 219 NMA_UNLOCK(nmd); 220 return error; 221 } 222 223 /* 224 * we store objects by kernel address, need to find the offset 225 * within the pool to export the value to userspace. 226 * Algorithm: scan until we find the cluster, then add the 227 * actual offset in the cluster 228 */ 229 static ssize_t 230 netmap_obj_offset(struct netmap_obj_pool *p, const void *vaddr) 231 { 232 int i, k = p->_clustentries, n = p->objtotal; 233 ssize_t ofs = 0; 234 235 for (i = 0; i < n; i += k, ofs += p->_clustsize) { 236 const char *base = p->lut[i].vaddr; 237 ssize_t relofs = (const char *) vaddr - base; 238 239 if (relofs < 0 || relofs >= p->_clustsize) 240 continue; 241 242 ofs = ofs + relofs; 243 ND("%s: return offset %d (cluster %d) for pointer %p", 244 p->name, ofs, i, vaddr); 245 return ofs; 246 } 247 D("address %p is not contained inside any cluster (%s)", 248 vaddr, p->name); 249 return 0; /* An error occurred */ 250 } 251 252 /* Helper functions which convert virtual addresses to offsets */ 253 #define netmap_if_offset(n, v) \ 254 netmap_obj_offset(&(n)->pools[NETMAP_IF_POOL], (v)) 255 256 #define netmap_ring_offset(n, v) \ 257 ((n)->pools[NETMAP_IF_POOL].memtotal + \ 258 netmap_obj_offset(&(n)->pools[NETMAP_RING_POOL], (v))) 259 260 #define netmap_buf_offset(n, v) \ 261 ((n)->pools[NETMAP_IF_POOL].memtotal + \ 262 (n)->pools[NETMAP_RING_POOL].memtotal + \ 263 netmap_obj_offset(&(n)->pools[NETMAP_BUF_POOL], (v))) 264 265 266 ssize_t 267 netmap_mem_if_offset(struct netmap_mem_d *nmd, const void *addr) 268 { 269 ssize_t v; 270 NMA_LOCK(nmd); 271 v = netmap_if_offset(nmd, addr); 272 NMA_UNLOCK(nmd); 273 return v; 274 } 275 276 /* 277 * report the index, and use start position as a hint, 278 * otherwise buffer allocation becomes terribly expensive. 279 */ 280 static void * 281 netmap_obj_malloc(struct netmap_obj_pool *p, u_int len, uint32_t *start, uint32_t *index) 282 { 283 uint32_t i = 0; /* index in the bitmap */ 284 uint32_t mask, j; /* slot counter */ 285 void *vaddr = NULL; 286 287 if (len > p->_objsize) { 288 D("%s request size %d too large", p->name, len); 289 // XXX cannot reduce the size 290 return NULL; 291 } 292 293 if (p->objfree == 0) { 294 D("%s allocator: run out of memory", p->name); 295 return NULL; 296 } 297 if (start) 298 i = *start; 299 300 /* termination is guaranteed by p->free, but better check bounds on i */ 301 while (vaddr == NULL && i < p->bitmap_slots) { 302 uint32_t cur = p->bitmap[i]; 303 if (cur == 0) { /* bitmask is fully used */ 304 i++; 305 continue; 306 } 307 /* locate a slot */ 308 for (j = 0, mask = 1; (cur & mask) == 0; j++, mask <<= 1) 309 ; 310 311 p->bitmap[i] &= ~mask; /* mark object as in use */ 312 p->objfree--; 313 314 vaddr = p->lut[i * 32 + j].vaddr; 315 if (index) 316 *index = i * 32 + j; 317 } 318 ND("%s allocator: allocated object @ [%d][%d]: vaddr %p", i, j, vaddr); 319 320 if (start) 321 *start = i; 322 return vaddr; 323 } 324 325 326 /* 327 * free by index, not by address. This is slow, but is only used 328 * for a small number of objects (rings, nifp) 329 */ 330 static void 331 netmap_obj_free(struct netmap_obj_pool *p, uint32_t j) 332 { 333 if (j >= p->objtotal) { 334 D("invalid index %u, max %u", j, p->objtotal); 335 return; 336 } 337 p->bitmap[j / 32] |= (1 << (j % 32)); 338 p->objfree++; 339 return; 340 } 341 342 static void 343 netmap_obj_free_va(struct netmap_obj_pool *p, void *vaddr) 344 { 345 u_int i, j, n = p->numclusters; 346 347 for (i = 0, j = 0; i < n; i++, j += p->_clustentries) { 348 void *base = p->lut[i * p->_clustentries].vaddr; 349 ssize_t relofs = (ssize_t) vaddr - (ssize_t) base; 350 351 /* Given address, is out of the scope of the current cluster.*/ 352 if (vaddr < base || relofs >= p->_clustsize) 353 continue; 354 355 j = j + relofs / p->_objsize; 356 /* KASSERT(j != 0, ("Cannot free object 0")); */ 357 netmap_obj_free(p, j); 358 return; 359 } 360 D("address %p is not contained inside any cluster (%s)", 361 vaddr, p->name); 362 } 363 364 #define netmap_if_malloc(n, len) netmap_obj_malloc(&(n)->pools[NETMAP_IF_POOL], len, NULL, NULL) 365 #define netmap_if_free(n, v) netmap_obj_free_va(&(n)->pools[NETMAP_IF_POOL], (v)) 366 #define netmap_ring_malloc(n, len) netmap_obj_malloc(&(n)->pools[NETMAP_RING_POOL], len, NULL, NULL) 367 #define netmap_ring_free(n, v) netmap_obj_free_va(&(n)->pools[NETMAP_RING_POOL], (v)) 368 #define netmap_buf_malloc(n, _pos, _index) \ 369 netmap_obj_malloc(&(n)->pools[NETMAP_BUF_POOL], NETMAP_BDG_BUF_SIZE(n), _pos, _index) 370 371 372 /* Return the index associated to the given packet buffer */ 373 #define netmap_buf_index(n, v) \ 374 (netmap_obj_offset(&(n)->pools[NETMAP_BUF_POOL], (v)) / NETMAP_BDG_BUF_SIZE(n)) 375 376 377 /* Return nonzero on error */ 378 static int 379 netmap_new_bufs(struct netmap_mem_d *nmd, struct netmap_slot *slot, u_int n) 380 { 381 struct netmap_obj_pool *p = &nmd->pools[NETMAP_BUF_POOL]; 382 u_int i = 0; /* slot counter */ 383 uint32_t pos = 0; /* slot in p->bitmap */ 384 uint32_t index = 0; /* buffer index */ 385 386 for (i = 0; i < n; i++) { 387 void *vaddr = netmap_buf_malloc(nmd, &pos, &index); 388 if (vaddr == NULL) { 389 D("unable to locate empty packet buffer"); 390 goto cleanup; 391 } 392 slot[i].buf_idx = index; 393 slot[i].len = p->_objsize; 394 /* XXX setting flags=NS_BUF_CHANGED forces a pointer reload 395 * in the NIC ring. This is a hack that hides missing 396 * initializations in the drivers, and should go away. 397 */ 398 // slot[i].flags = NS_BUF_CHANGED; 399 } 400 401 ND("allocated %d buffers, %d available, first at %d", n, p->objfree, pos); 402 return (0); 403 404 cleanup: 405 while (i > 0) { 406 i--; 407 netmap_obj_free(p, slot[i].buf_idx); 408 } 409 bzero(slot, n * sizeof(slot[0])); 410 return (ENOMEM); 411 } 412 413 414 static void 415 netmap_free_buf(struct netmap_mem_d *nmd, uint32_t i) 416 { 417 struct netmap_obj_pool *p = &nmd->pools[NETMAP_BUF_POOL]; 418 419 if (i < 2 || i >= p->objtotal) { 420 D("Cannot free buf#%d: should be in [2, %d[", i, p->objtotal); 421 return; 422 } 423 netmap_obj_free(p, i); 424 } 425 426 static void 427 netmap_reset_obj_allocator(struct netmap_obj_pool *p) 428 { 429 430 if (p == NULL) 431 return; 432 if (p->bitmap) 433 kfree(p->bitmap, M_NETMAP); 434 p->bitmap = NULL; 435 if (p->lut) { 436 u_int i; 437 size_t sz = p->_clustsize; 438 439 for (i = 0; i < p->objtotal; i += p->_clustentries) { 440 if (p->lut[i].vaddr) 441 contigfree(p->lut[i].vaddr, sz, M_NETMAP); 442 } 443 bzero(p->lut, sizeof(struct lut_entry) * p->objtotal); 444 kfree(p->lut, M_NETMAP); 445 } 446 p->lut = NULL; 447 p->objtotal = 0; 448 p->memtotal = 0; 449 p->numclusters = 0; 450 p->objfree = 0; 451 } 452 453 /* 454 * Free all resources related to an allocator. 455 */ 456 static void 457 netmap_destroy_obj_allocator(struct netmap_obj_pool *p) 458 { 459 if (p == NULL) 460 return; 461 netmap_reset_obj_allocator(p); 462 } 463 464 /* 465 * We receive a request for objtotal objects, of size objsize each. 466 * Internally we may round up both numbers, as we allocate objects 467 * in small clusters multiple of the page size. 468 * We need to keep track of objtotal and clustentries, 469 * as they are needed when freeing memory. 470 * 471 * XXX note -- userspace needs the buffers to be contiguous, 472 * so we cannot afford gaps at the end of a cluster. 473 */ 474 475 476 /* call with NMA_LOCK held */ 477 static int 478 netmap_config_obj_allocator(struct netmap_obj_pool *p, u_int objtotal, u_int objsize) 479 { 480 int i; 481 u_int clustsize; /* the cluster size, multiple of page size */ 482 u_int clustentries; /* how many objects per entry */ 483 484 /* we store the current request, so we can 485 * detect configuration changes later */ 486 p->r_objtotal = objtotal; 487 p->r_objsize = objsize; 488 489 #define MAX_CLUSTSIZE (1<<17) 490 #define LINE_ROUND 64 491 if (objsize >= MAX_CLUSTSIZE) { 492 /* we could do it but there is no point */ 493 D("unsupported allocation for %d bytes", objsize); 494 return EINVAL; 495 } 496 /* make sure objsize is a multiple of LINE_ROUND */ 497 i = (objsize & (LINE_ROUND - 1)); 498 if (i) { 499 D("XXX aligning object by %d bytes", LINE_ROUND - i); 500 objsize += LINE_ROUND - i; 501 } 502 if (objsize < p->objminsize || objsize > p->objmaxsize) { 503 D("requested objsize %d out of range [%d, %d]", 504 objsize, p->objminsize, p->objmaxsize); 505 return EINVAL; 506 } 507 if (objtotal < p->nummin || objtotal > p->nummax) { 508 D("requested objtotal %d out of range [%d, %d]", 509 objtotal, p->nummin, p->nummax); 510 return EINVAL; 511 } 512 /* 513 * Compute number of objects using a brute-force approach: 514 * given a max cluster size, 515 * we try to fill it with objects keeping track of the 516 * wasted space to the next page boundary. 517 */ 518 for (clustentries = 0, i = 1;; i++) { 519 u_int delta, used = i * objsize; 520 if (used > MAX_CLUSTSIZE) 521 break; 522 delta = used % PAGE_SIZE; 523 if (delta == 0) { // exact solution 524 clustentries = i; 525 break; 526 } 527 if (delta > ( (clustentries*objsize) % PAGE_SIZE) ) 528 clustentries = i; 529 } 530 // D("XXX --- ouch, delta %d (bad for buffers)", delta); 531 /* compute clustsize and round to the next page */ 532 clustsize = clustentries * objsize; 533 i = (clustsize & (PAGE_SIZE - 1)); 534 if (i) 535 clustsize += PAGE_SIZE - i; 536 if (netmap_verbose) 537 D("objsize %d clustsize %d objects %d", 538 objsize, clustsize, clustentries); 539 540 /* 541 * The number of clusters is n = ceil(objtotal/clustentries) 542 * objtotal' = n * clustentries 543 */ 544 p->_clustentries = clustentries; 545 p->_clustsize = clustsize; 546 p->_numclusters = (objtotal + clustentries - 1) / clustentries; 547 548 /* actual values (may be larger than requested) */ 549 p->_objsize = objsize; 550 p->_objtotal = p->_numclusters * clustentries; 551 552 return 0; 553 } 554 555 556 /* call with NMA_LOCK held */ 557 static int 558 netmap_finalize_obj_allocator(struct netmap_obj_pool *p) 559 { 560 int i; /* must be signed */ 561 size_t n; 562 563 /* optimistically assume we have enough memory */ 564 p->numclusters = p->_numclusters; 565 p->objtotal = p->_objtotal; 566 567 n = sizeof(struct lut_entry) * p->objtotal; 568 p->lut = kmalloc(n, M_NETMAP, M_NOWAIT | M_ZERO); 569 if (p->lut == NULL) { 570 D("Unable to create lookup table (%d bytes) for '%s'", (int)n, p->name); 571 goto clean; 572 } 573 574 /* Allocate the bitmap */ 575 n = (p->objtotal + 31) / 32; 576 p->bitmap = kmalloc(sizeof(uint32_t) * n, M_NETMAP, M_NOWAIT | M_ZERO); 577 if (p->bitmap == NULL) { 578 D("Unable to create bitmap (%d entries) for allocator '%s'", (int)n, 579 p->name); 580 goto clean; 581 } 582 p->bitmap_slots = n; 583 584 /* 585 * Allocate clusters, init pointers and bitmap 586 */ 587 588 n = p->_clustsize; 589 for (i = 0; i < (int)p->objtotal;) { 590 int lim = i + p->_clustentries; 591 char *clust; 592 593 clust = contigmalloc(n, M_NETMAP, M_NOWAIT | M_ZERO, 594 (size_t)0, -1UL, PAGE_SIZE, 0); 595 if (clust == NULL) { 596 /* 597 * If we get here, there is a severe memory shortage, 598 * so halve the allocated memory to reclaim some. 599 */ 600 D("Unable to create cluster at %d for '%s' allocator", 601 i, p->name); 602 if (i < 2) /* nothing to halve */ 603 goto out; 604 lim = i / 2; 605 for (i--; i >= lim; i--) { 606 p->bitmap[ (i>>5) ] &= ~( 1 << (i & 31) ); 607 if (i % p->_clustentries == 0 && p->lut[i].vaddr) 608 contigfree(p->lut[i].vaddr, 609 n, M_NETMAP); 610 } 611 out: 612 p->objtotal = i; 613 /* we may have stopped in the middle of a cluster */ 614 p->numclusters = (i + p->_clustentries - 1) / p->_clustentries; 615 break; 616 } 617 for (; i < lim; i++, clust += p->_objsize) { 618 p->bitmap[ (i>>5) ] |= ( 1 << (i & 31) ); 619 p->lut[i].vaddr = clust; 620 p->lut[i].paddr = vtophys(clust); 621 } 622 } 623 p->objfree = p->objtotal; 624 p->memtotal = p->numclusters * p->_clustsize; 625 if (p->objfree == 0) 626 goto clean; 627 if (netmap_verbose) 628 D("Pre-allocated %d clusters (%d/%dKB) for '%s'", 629 p->numclusters, p->_clustsize >> 10, 630 p->memtotal >> 10, p->name); 631 632 return 0; 633 634 clean: 635 netmap_reset_obj_allocator(p); 636 return ENOMEM; 637 } 638 639 /* call with lock held */ 640 static int 641 netmap_memory_config_changed(struct netmap_mem_d *nmd) 642 { 643 int i; 644 645 for (i = 0; i < NETMAP_POOLS_NR; i++) { 646 if (nmd->pools[i].r_objsize != netmap_params[i].size || 647 nmd->pools[i].r_objtotal != netmap_params[i].num) 648 return 1; 649 } 650 return 0; 651 } 652 653 static void 654 netmap_mem_reset_all(struct netmap_mem_d *nmd) 655 { 656 int i; 657 D("resetting %p", nmd); 658 for (i = 0; i < NETMAP_POOLS_NR; i++) { 659 netmap_reset_obj_allocator(&nmd->pools[i]); 660 } 661 nmd->flags &= ~NETMAP_MEM_FINALIZED; 662 } 663 664 static int 665 netmap_mem_finalize_all(struct netmap_mem_d *nmd) 666 { 667 int i; 668 if (nmd->flags & NETMAP_MEM_FINALIZED) 669 return 0; 670 nmd->lasterr = 0; 671 nmd->nm_totalsize = 0; 672 for (i = 0; i < NETMAP_POOLS_NR; i++) { 673 nmd->lasterr = netmap_finalize_obj_allocator(&nmd->pools[i]); 674 if (nmd->lasterr) 675 goto error; 676 nmd->nm_totalsize += nmd->pools[i].memtotal; 677 } 678 /* buffers 0 and 1 are reserved */ 679 nmd->pools[NETMAP_BUF_POOL].objfree -= 2; 680 nmd->pools[NETMAP_BUF_POOL].bitmap[0] = ~3; 681 nmd->flags |= NETMAP_MEM_FINALIZED; 682 683 D("Have %d KB for interfaces, %d KB for rings and %d MB for buffers", 684 nmd->pools[NETMAP_IF_POOL].memtotal >> 10, 685 nmd->pools[NETMAP_RING_POOL].memtotal >> 10, 686 nmd->pools[NETMAP_BUF_POOL].memtotal >> 20); 687 688 D("Free buffers: %d", nmd->pools[NETMAP_BUF_POOL].objfree); 689 690 691 return 0; 692 error: 693 netmap_mem_reset_all(nmd); 694 return nmd->lasterr; 695 } 696 697 698 699 void 700 netmap_mem_private_delete(struct netmap_mem_d *nmd) 701 { 702 if (nmd == NULL) 703 return; 704 D("deleting %p", nmd); 705 if (nmd->refcount > 0) 706 D("bug: deleting mem allocator with refcount=%d!", nmd->refcount); 707 D("done deleting %p", nmd); 708 NMA_LOCK_DESTROY(nmd); 709 kfree(nmd, M_DEVBUF); 710 } 711 712 static int 713 netmap_mem_private_config(struct netmap_mem_d *nmd) 714 { 715 /* nothing to do, we are configured on creation 716 * and configuration never changes thereafter 717 */ 718 return 0; 719 } 720 721 static int 722 netmap_mem_private_finalize(struct netmap_mem_d *nmd) 723 { 724 int err; 725 NMA_LOCK(nmd); 726 nmd->refcount++; 727 err = netmap_mem_finalize_all(nmd); 728 NMA_UNLOCK(nmd); 729 return err; 730 731 } 732 733 static void 734 netmap_mem_private_deref(struct netmap_mem_d *nmd) 735 { 736 NMA_LOCK(nmd); 737 if (--nmd->refcount <= 0) 738 netmap_mem_reset_all(nmd); 739 NMA_UNLOCK(nmd); 740 } 741 742 struct netmap_mem_d * 743 netmap_mem_private_new(const char *name, u_int txr, u_int txd, u_int rxr, u_int rxd) 744 { 745 struct netmap_mem_d *d = NULL; 746 struct netmap_obj_params p[NETMAP_POOLS_NR]; 747 int i; 748 u_int maxd; 749 750 d = kmalloc(sizeof(struct netmap_mem_d), 751 M_DEVBUF, M_NOWAIT | M_ZERO); 752 if (d == NULL) 753 return NULL; 754 755 *d = nm_blueprint; 756 757 /* XXX the rest of the code assumes the stack rings are alwasy present */ 758 txr++; 759 rxr++; 760 p[NETMAP_IF_POOL].size = sizeof(struct netmap_if) + 761 sizeof(ssize_t) * (txr + rxr); 762 p[NETMAP_IF_POOL].num = 2; 763 maxd = (txd > rxd) ? txd : rxd; 764 p[NETMAP_RING_POOL].size = sizeof(struct netmap_ring) + 765 sizeof(struct netmap_slot) * maxd; 766 p[NETMAP_RING_POOL].num = txr + rxr; 767 p[NETMAP_BUF_POOL].size = 2048; /* XXX find a way to let the user choose this */ 768 p[NETMAP_BUF_POOL].num = rxr * (rxd + 2) + txr * (txd + 2); 769 770 D("req if %d*%d ring %d*%d buf %d*%d", 771 p[NETMAP_IF_POOL].num, 772 p[NETMAP_IF_POOL].size, 773 p[NETMAP_RING_POOL].num, 774 p[NETMAP_RING_POOL].size, 775 p[NETMAP_BUF_POOL].num, 776 p[NETMAP_BUF_POOL].size); 777 778 for (i = 0; i < NETMAP_POOLS_NR; i++) { 779 ksnprintf(d->pools[i].name, NETMAP_POOL_MAX_NAMSZ, 780 nm_blueprint.pools[i].name, 781 name); 782 if (netmap_config_obj_allocator(&d->pools[i], 783 p[i].num, p[i].size)) 784 goto error; 785 } 786 787 d->flags &= ~NETMAP_MEM_FINALIZED; 788 789 NMA_LOCK_INIT(d); 790 791 return d; 792 error: 793 netmap_mem_private_delete(d); 794 return NULL; 795 } 796 797 798 /* call with lock held */ 799 static int 800 netmap_mem_global_config(struct netmap_mem_d *nmd) 801 { 802 int i; 803 804 if (nmd->refcount) 805 /* already in use, we cannot change the configuration */ 806 goto out; 807 808 if (!netmap_memory_config_changed(nmd)) 809 goto out; 810 811 D("reconfiguring"); 812 813 if (nmd->flags & NETMAP_MEM_FINALIZED) { 814 /* reset previous allocation */ 815 for (i = 0; i < NETMAP_POOLS_NR; i++) { 816 netmap_reset_obj_allocator(&nmd->pools[i]); 817 } 818 nmd->flags &= ~NETMAP_MEM_FINALIZED; 819 } 820 821 for (i = 0; i < NETMAP_POOLS_NR; i++) { 822 nmd->lasterr = netmap_config_obj_allocator(&nmd->pools[i], 823 netmap_params[i].num, netmap_params[i].size); 824 if (nmd->lasterr) 825 goto out; 826 } 827 828 out: 829 830 return nmd->lasterr; 831 } 832 833 static int 834 netmap_mem_global_finalize(struct netmap_mem_d *nmd) 835 { 836 int err; 837 838 NMA_LOCK(nmd); 839 840 841 /* update configuration if changed */ 842 if (netmap_mem_global_config(nmd)) 843 goto out; 844 845 nmd->refcount++; 846 847 if (nmd->flags & NETMAP_MEM_FINALIZED) { 848 /* may happen if config is not changed */ 849 ND("nothing to do"); 850 goto out; 851 } 852 853 if (netmap_mem_finalize_all(nmd)) 854 goto out; 855 856 /* backward compatibility */ 857 netmap_buf_size = nmd->pools[NETMAP_BUF_POOL]._objsize; 858 netmap_total_buffers = nmd->pools[NETMAP_BUF_POOL].objtotal; 859 860 netmap_buffer_lut = nmd->pools[NETMAP_BUF_POOL].lut; 861 netmap_buffer_base = nmd->pools[NETMAP_BUF_POOL].lut[0].vaddr; 862 863 nmd->lasterr = 0; 864 865 out: 866 if (nmd->lasterr) 867 nmd->refcount--; 868 err = nmd->lasterr; 869 870 NMA_UNLOCK(nmd); 871 872 return err; 873 874 } 875 876 int 877 netmap_mem_init(void) 878 { 879 NMA_LOCK_INIT(&nm_mem); 880 return (0); 881 } 882 883 void 884 netmap_mem_fini(void) 885 { 886 int i; 887 888 for (i = 0; i < NETMAP_POOLS_NR; i++) { 889 netmap_destroy_obj_allocator(&nm_mem.pools[i]); 890 } 891 NMA_LOCK_DESTROY(&nm_mem); 892 } 893 894 static void 895 netmap_free_rings(struct netmap_adapter *na) 896 { 897 u_int i; 898 if (!na->tx_rings) 899 return; 900 for (i = 0; i < na->num_tx_rings + 1; i++) { 901 if (na->tx_rings[i].ring) { 902 netmap_ring_free(na->nm_mem, na->tx_rings[i].ring); 903 na->tx_rings[i].ring = NULL; 904 } 905 } 906 for (i = 0; i < na->num_rx_rings + 1; i++) { 907 if (na->rx_rings[i].ring) { 908 netmap_ring_free(na->nm_mem, na->rx_rings[i].ring); 909 na->rx_rings[i].ring = NULL; 910 } 911 } 912 } 913 914 /* call with NMA_LOCK held * 915 * 916 * Allocate netmap rings and buffers for this card 917 * The rings are contiguous, but have variable size. 918 */ 919 int 920 netmap_mem_rings_create(struct netmap_adapter *na) 921 { 922 struct netmap_ring *ring; 923 u_int len, ndesc; 924 struct netmap_kring *kring; 925 926 NMA_LOCK(na->nm_mem); 927 928 for (kring = na->tx_rings; kring != na->rx_rings; kring++) { /* Transmit rings */ 929 ndesc = kring->nkr_num_slots; 930 len = sizeof(struct netmap_ring) + 931 ndesc * sizeof(struct netmap_slot); 932 ring = netmap_ring_malloc(na->nm_mem, len); 933 if (ring == NULL) { 934 D("Cannot allocate tx_ring"); 935 goto cleanup; 936 } 937 ND("txring[%d] at %p ofs %d", i, ring); 938 kring->ring = ring; 939 *(uint32_t *)(uintptr_t)&ring->num_slots = ndesc; 940 *(ssize_t *)(uintptr_t)&ring->buf_ofs = 941 (na->nm_mem->pools[NETMAP_IF_POOL].memtotal + 942 na->nm_mem->pools[NETMAP_RING_POOL].memtotal) - 943 netmap_ring_offset(na->nm_mem, ring); 944 945 ring->avail = kring->nr_hwavail; 946 ring->cur = kring->nr_hwcur; 947 *(uint16_t *)(uintptr_t)&ring->nr_buf_size = 948 NETMAP_BDG_BUF_SIZE(na->nm_mem); 949 ND("initializing slots for txring"); 950 if (netmap_new_bufs(na->nm_mem, ring->slot, ndesc)) { 951 D("Cannot allocate buffers for tx_ring"); 952 goto cleanup; 953 } 954 } 955 956 for ( ; kring != na->tailroom; kring++) { /* Receive rings */ 957 ndesc = kring->nkr_num_slots; 958 len = sizeof(struct netmap_ring) + 959 ndesc * sizeof(struct netmap_slot); 960 ring = netmap_ring_malloc(na->nm_mem, len); 961 if (ring == NULL) { 962 D("Cannot allocate rx_ring"); 963 goto cleanup; 964 } 965 ND("rxring at %p ofs %d", ring); 966 967 kring->ring = ring; 968 *(uint32_t *)(uintptr_t)&ring->num_slots = ndesc; 969 *(ssize_t *)(uintptr_t)&ring->buf_ofs = 970 (na->nm_mem->pools[NETMAP_IF_POOL].memtotal + 971 na->nm_mem->pools[NETMAP_RING_POOL].memtotal) - 972 netmap_ring_offset(na->nm_mem, ring); 973 974 ring->cur = kring->nr_hwcur; 975 ring->avail = kring->nr_hwavail; 976 *(int *)(uintptr_t)&ring->nr_buf_size = 977 NETMAP_BDG_BUF_SIZE(na->nm_mem); 978 ND("initializing slots for rxring[%d]", i); 979 if (netmap_new_bufs(na->nm_mem, ring->slot, ndesc)) { 980 D("Cannot allocate buffers for rx_ring"); 981 goto cleanup; 982 } 983 } 984 985 NMA_UNLOCK(na->nm_mem); 986 987 return 0; 988 989 cleanup: 990 netmap_free_rings(na); 991 992 NMA_UNLOCK(na->nm_mem); 993 994 return ENOMEM; 995 } 996 997 void 998 netmap_mem_rings_delete(struct netmap_adapter *na) 999 { 1000 /* last instance, release bufs and rings */ 1001 u_int i, lim; 1002 struct netmap_kring *kring; 1003 struct netmap_ring *ring; 1004 1005 NMA_LOCK(na->nm_mem); 1006 1007 for (kring = na->tx_rings; kring != na->tailroom; kring++) { 1008 ring = kring->ring; 1009 if (ring == NULL) 1010 continue; 1011 lim = kring->nkr_num_slots; 1012 for (i = 0; i < lim; i++) 1013 netmap_free_buf(na->nm_mem, ring->slot[i].buf_idx); 1014 } 1015 netmap_free_rings(na); 1016 1017 NMA_UNLOCK(na->nm_mem); 1018 } 1019 1020 1021 /* call with NMA_LOCK held */ 1022 /* 1023 * Allocate the per-fd structure netmap_if. 1024 * 1025 * We assume that the configuration stored in na 1026 * (number of tx/rx rings and descs) does not change while 1027 * the interface is in netmap mode. 1028 */ 1029 struct netmap_if * 1030 netmap_mem_if_new(const char *ifname, struct netmap_adapter *na) 1031 { 1032 struct netmap_if *nifp; 1033 ssize_t base; /* handy for relative offsets between rings and nifp */ 1034 u_int i, len, ntx, nrx; 1035 1036 /* 1037 * verify whether virtual port need the stack ring 1038 */ 1039 ntx = na->num_tx_rings + 1; /* shorthand, include stack ring */ 1040 nrx = na->num_rx_rings + 1; /* shorthand, include stack ring */ 1041 /* 1042 * the descriptor is followed inline by an array of offsets 1043 * to the tx and rx rings in the shared memory region. 1044 * For virtual rx rings we also allocate an array of 1045 * pointers to assign to nkr_leases. 1046 */ 1047 1048 NMA_LOCK(na->nm_mem); 1049 1050 len = sizeof(struct netmap_if) + (nrx + ntx) * sizeof(ssize_t); 1051 nifp = netmap_if_malloc(na->nm_mem, len); 1052 if (nifp == NULL) { 1053 NMA_UNLOCK(na->nm_mem); 1054 return NULL; 1055 } 1056 1057 /* initialize base fields -- override const */ 1058 *(u_int *)(uintptr_t)&nifp->ni_tx_rings = na->num_tx_rings; 1059 *(u_int *)(uintptr_t)&nifp->ni_rx_rings = na->num_rx_rings; 1060 strncpy(nifp->ni_name, ifname, (size_t)IFNAMSIZ); 1061 1062 /* 1063 * fill the slots for the rx and tx rings. They contain the offset 1064 * between the ring and nifp, so the information is usable in 1065 * userspace to reach the ring from the nifp. 1066 */ 1067 base = netmap_if_offset(na->nm_mem, nifp); 1068 for (i = 0; i < ntx; i++) { 1069 *(ssize_t *)(uintptr_t)&nifp->ring_ofs[i] = 1070 netmap_ring_offset(na->nm_mem, na->tx_rings[i].ring) - base; 1071 } 1072 for (i = 0; i < nrx; i++) { 1073 *(ssize_t *)(uintptr_t)&nifp->ring_ofs[i+ntx] = 1074 netmap_ring_offset(na->nm_mem, na->rx_rings[i].ring) - base; 1075 } 1076 1077 NMA_UNLOCK(na->nm_mem); 1078 1079 return (nifp); 1080 } 1081 1082 void 1083 netmap_mem_if_delete(struct netmap_adapter *na, struct netmap_if *nifp) 1084 { 1085 if (nifp == NULL) 1086 /* nothing to do */ 1087 return; 1088 NMA_LOCK(na->nm_mem); 1089 1090 netmap_if_free(na->nm_mem, nifp); 1091 1092 NMA_UNLOCK(na->nm_mem); 1093 } 1094 1095 static void 1096 netmap_mem_global_deref(struct netmap_mem_d *nmd) 1097 { 1098 NMA_LOCK(nmd); 1099 1100 nmd->refcount--; 1101 if (netmap_verbose) 1102 D("refcount = %d", nmd->refcount); 1103 1104 NMA_UNLOCK(nmd); 1105 } 1106 1107 int 1108 netmap_mem_finalize(struct netmap_mem_d *nmd) 1109 { 1110 return nmd->finalize(nmd); 1111 } 1112 1113 void 1114 netmap_mem_deref(struct netmap_mem_d *nmd) 1115 { 1116 return nmd->deref(nmd); 1117 } 1118