Lines Matching +full:d +full:-
1 /*-
2 * SPDX-License-Identifier: BSD-2-Clause
52 nmport_push_cleanup(struct nmport_d *d, struct nmport_cleanup_d *c) in nmport_push_cleanup() argument
54 c->next = d->clist; in nmport_push_cleanup()
55 d->clist = c; in nmport_push_cleanup()
59 nmport_pop_cleanup(struct nmport_d *d) in nmport_pop_cleanup() argument
63 top = d->clist; in nmport_pop_cleanup()
64 d->clist = d->clist->next; in nmport_pop_cleanup()
65 (*top->cleanup)(top, d); in nmport_pop_cleanup()
66 nmctx_free(d->ctx, top); in nmport_pop_cleanup()
69 void nmport_do_cleanup(struct nmport_d *d) in nmport_do_cleanup() argument
71 while (d->clist != NULL) { in nmport_do_cleanup()
72 nmport_pop_cleanup(d); in nmport_do_cleanup()
79 struct nmport_d *d; in nmport_new_with_ctx() local
82 d = nmctx_malloc(ctx, sizeof(*d)); in nmport_new_with_ctx()
83 if (d == NULL) { in nmport_new_with_ctx()
87 memset(d, 0, sizeof(*d)); in nmport_new_with_ctx()
89 nmreq_header_init(&d->hdr, NETMAP_REQ_REGISTER, &d->reg); in nmport_new_with_ctx()
91 d->ctx = ctx; in nmport_new_with_ctx()
92 d->fd = -1; in nmport_new_with_ctx()
95 return d; in nmport_new_with_ctx()
107 nmport_delete(struct nmport_d *d) in nmport_delete() argument
109 nmctx_free(d->ctx, d); in nmport_delete()
113 nmport_extmem_cleanup(struct nmport_cleanup_d *c, struct nmport_d *d) in nmport_extmem_cleanup() argument
117 if (d->extmem == NULL) in nmport_extmem_cleanup()
120 nmreq_remove_option(&d->hdr, &d->extmem->nro_opt); in nmport_extmem_cleanup()
121 nmctx_free(d->ctx, d->extmem); in nmport_extmem_cleanup()
122 d->extmem = NULL; in nmport_extmem_cleanup()
127 nmport_extmem(struct nmport_d *d, void *base, size_t size) in nmport_extmem() argument
129 struct nmctx *ctx = d->ctx; in nmport_extmem()
132 if (d->register_done) { in nmport_extmem()
133 nmctx_ferror(ctx, "%s: cannot set extmem of an already registered port", d->hdr.nr_name); in nmport_extmem()
135 return -1; in nmport_extmem()
138 if (d->extmem != NULL) { in nmport_extmem()
139 nmctx_ferror(ctx, "%s: extmem already in use", d->hdr.nr_name); in nmport_extmem()
141 return -1; in nmport_extmem()
148 return -1; in nmport_extmem()
151 d->extmem = nmctx_malloc(ctx, sizeof(*d->extmem)); in nmport_extmem()
152 if (d->extmem == NULL) { in nmport_extmem()
153 nmctx_ferror(ctx, "%s: cannot allocate extmem option", d->hdr.nr_name); in nmport_extmem()
156 return -1; in nmport_extmem()
158 memset(d->extmem, 0, sizeof(*d->extmem)); in nmport_extmem()
159 d->extmem->nro_usrptr = (uintptr_t)base; in nmport_extmem()
160 d->extmem->nro_opt.nro_reqtype = NETMAP_REQ_OPT_EXTMEM; in nmport_extmem()
161 d->extmem->nro_info.nr_memsize = size; in nmport_extmem()
162 nmreq_push_option(&d->hdr, &d->extmem->nro_opt); in nmport_extmem()
164 clnup->cleanup = nmport_extmem_cleanup; in nmport_extmem()
165 nmport_push_cleanup(d, clnup); in nmport_extmem()
177 struct nmport_d *d) in nmport_extmem_from_file_cleanup() argument
179 (void)d; in nmport_extmem_from_file_cleanup()
183 munmap(cc->p, cc->size); in nmport_extmem_from_file_cleanup()
187 nmport_extmem_from_file(struct nmport_d *d, const char *fname) in nmport_extmem_from_file() argument
189 struct nmctx *ctx = d->ctx; in nmport_extmem_from_file()
190 int fd = -1; in nmport_extmem_from_file()
219 clnup->p = p; in nmport_extmem_from_file()
220 clnup->size = mapsize; in nmport_extmem_from_file()
221 clnup->up.cleanup = nmport_extmem_from_file_cleanup; in nmport_extmem_from_file()
222 nmport_push_cleanup(d, &clnup->up); in nmport_extmem_from_file()
224 if (nmport_extmem(d, p, mapsize) < 0) in nmport_extmem_from_file()
233 if (clnup->p != MAP_FAILED) in nmport_extmem_from_file()
234 nmport_pop_cleanup(d); in nmport_extmem_from_file()
238 return -1; in nmport_extmem_from_file()
242 nmport_extmem_getinfo(struct nmport_d *d) in nmport_extmem_getinfo() argument
244 if (d->extmem == NULL) in nmport_extmem_getinfo()
246 return &d->extmem->nro_info; in nmport_extmem_getinfo()
256 struct nmport_d *d) in nmport_offset_cleanup() argument
261 nmreq_remove_option(&d->hdr, &cc->opt->nro_opt); in nmport_offset_cleanup()
262 nmctx_free(d->ctx, cc->opt); in nmport_offset_cleanup()
266 nmport_offset(struct nmport_d *d, uint64_t initial, in nmport_offset() argument
269 struct nmctx *ctx = d->ctx; in nmport_offset()
277 return -1; in nmport_offset()
282 nmctx_ferror(ctx, "%s: cannot allocate offset option", d->hdr.nr_name); in nmport_offset()
285 return -1; in nmport_offset()
288 opt->nro_opt.nro_reqtype = NETMAP_REQ_OPT_OFFSETS; in nmport_offset()
289 opt->nro_offset_bits = bits; in nmport_offset()
290 opt->nro_initial_offset = initial; in nmport_offset()
291 opt->nro_max_offset = maxoff; in nmport_offset()
292 opt->nro_min_gap = mingap; in nmport_offset()
293 nmreq_push_option(&d->hdr, &opt->nro_opt); in nmport_offset()
295 clnup->up.cleanup = nmport_offset_cleanup; in nmport_offset()
296 clnup->opt = opt; in nmport_offset()
297 nmport_push_cleanup(d, &clnup->up); in nmport_offset()
314 .default_key = -1, \
333 struct nmreq_opt_parser *o = k->option; in nmport_opt_key_ctor()
336 k->id = o->nr_keys; in nmport_opt_key_ctor()
337 ok = &o->keys[k->id]; in nmport_opt_key_ctor()
338 ok->key = k->key; in nmport_opt_key_ctor()
339 ok->id = k->id; in nmport_opt_key_ctor()
340 ok->flags = k->flags; in nmport_opt_key_ctor()
341 o->nr_keys++; in nmport_opt_key_ctor()
342 if (ok->flags & NMREQ_OPTK_DEFAULT) in nmport_opt_key_ctor()
343 o->default_key = ok->id; in nmport_opt_key_ctor()
352 .id = -1, \
359 #define nmport_key(p, o, k) ((p)->keys[NPKEY_ID(o, k)])
360 #define nmport_defkey(p, o) ((p)->keys[NPOPT_DESC(o).default_key])
390 struct nmctx *ctx = p->ctx; in NPOPT_DECL()
391 struct nmport_d *d = p->token; in NPOPT_DECL() local
397 return -1; in NPOPT_DECL()
398 if (d->reg.nr_mem_id && d->reg.nr_mem_id != mem_id) { in NPOPT_DECL()
400 mem_id, d->reg.nr_mem_id); in NPOPT_DECL()
402 return -1; in NPOPT_DECL()
404 d->reg.nr_mem_id = mem_id; in NPOPT_DECL()
411 struct nmport_d *d; in NPOPT_PARSER() local
415 d = p->token; in NPOPT_PARSER()
417 if (nmport_extmem_from_file(d, nmport_key(p, extmem, file)) < 0) in NPOPT_PARSER()
418 return -1; in NPOPT_PARSER()
420 pi = &d->extmem->nro_info; in NPOPT_PARSER()
423 const char *k = p->keys[i]; in NPOPT_PARSER()
431 pi->nr_if_pool_objtotal = v; in NPOPT_PARSER()
433 pi->nr_if_pool_objsize = v; in NPOPT_PARSER()
435 pi->nr_ring_pool_objtotal = v; in NPOPT_PARSER()
437 pi->nr_ring_pool_objsize = v; in NPOPT_PARSER()
439 pi->nr_buf_pool_objtotal = v; in NPOPT_PARSER()
441 pi->nr_buf_pool_objsize = v; in NPOPT_PARSER()
450 struct nmport_d *d; in NPOPT_PARSER() local
452 d = p->token; in NPOPT_PARSER()
456 d->reg.nr_tx_rings = nr_rings; in NPOPT_PARSER()
457 d->reg.nr_rx_rings = nr_rings; in NPOPT_PARSER()
461 d->reg.nr_host_tx_rings = nr_rings; in NPOPT_PARSER()
462 d->reg.nr_host_rx_rings = nr_rings; in NPOPT_PARSER()
466 d->reg.nr_tx_slots = nr_slots; in NPOPT_PARSER()
467 d->reg.nr_rx_slots = nr_slots; in NPOPT_PARSER()
470 d->reg.nr_tx_rings = atoi(nmport_key(p, conf, tx_rings)); in NPOPT_PARSER()
473 d->reg.nr_rx_rings = atoi(nmport_key(p, conf, rx_rings)); in NPOPT_PARSER()
476 d->reg.nr_host_tx_rings = atoi(nmport_key(p, conf, host_tx_rings)); in NPOPT_PARSER()
479 d->reg.nr_host_rx_rings = atoi(nmport_key(p, conf, host_rx_rings)); in NPOPT_PARSER()
482 d->reg.nr_tx_slots = atoi(nmport_key(p, conf, tx_slots)); in NPOPT_PARSER()
485 d->reg.nr_rx_slots = atoi(nmport_key(p, conf, rx_slots)); in NPOPT_PARSER()
493 struct nmport_d *d; in NPOPT_PARSER() local
496 d = p->token; in NPOPT_PARSER()
503 return nmport_offset(d, initial, initial, bits, 0); in NPOPT_PARSER()
512 for (p = nmport_opt_parsers; p != NULL; p = p->next) { in nmport_disable_option()
513 if (!strcmp(p->prefix, opt)) { in nmport_disable_option()
514 p->flags |= NMREQ_OPTF_DISABLED; in nmport_disable_option()
524 for (p = nmport_opt_parsers; p != NULL; p = p->next) { in nmport_enable_option()
525 if (!strcmp(p->prefix, opt)) { in nmport_enable_option()
526 p->flags &= ~NMREQ_OPTF_DISABLED; in nmport_enable_option()
531 return -1; in nmport_enable_option()
536 nmport_parse(struct nmport_d *d, const char *ifname) in nmport_parse() argument
540 if (nmreq_header_decode(&scan, &d->hdr, d->ctx) < 0) { in nmport_parse()
545 if (nmreq_register_decode(&scan, &d->reg, d->ctx) < 0) { in nmport_parse()
550 if (nmreq_options_decode(scan, nmport_opt_parsers, d, d->ctx) < 0) { in nmport_parse()
556 nmport_undo_parse(d); in nmport_parse()
557 return -1; in nmport_parse()
561 nmport_undo_parse(struct nmport_d *d) in nmport_undo_parse() argument
563 nmport_do_cleanup(d); in nmport_undo_parse()
564 memset(&d->reg, 0, sizeof(d->reg)); in nmport_undo_parse()
565 memset(&d->hdr, 0, sizeof(d->hdr)); in nmport_undo_parse()
571 struct nmport_d *d; in nmport_prepare() local
574 d = nmport_new(); in nmport_prepare()
575 if (d == NULL) in nmport_prepare()
579 if (nmport_parse(d, ifname) < 0) in nmport_prepare()
582 return d; in nmport_prepare()
585 nmport_undo_prepare(d); in nmport_prepare()
590 nmport_undo_prepare(struct nmport_d *d) in nmport_undo_prepare() argument
592 if (d == NULL) in nmport_undo_prepare()
594 nmport_undo_parse(d); in nmport_undo_prepare()
595 nmport_delete(d); in nmport_undo_prepare()
599 nmport_register(struct nmport_d *d) in nmport_register() argument
601 struct nmctx *ctx = d->ctx; in nmport_register()
603 if (d->register_done) { in nmport_register()
605 nmctx_ferror(ctx, "%s: already registered", d->hdr.nr_name); in nmport_register()
606 return -1; in nmport_register()
609 d->fd = open("/dev/netmap", O_RDWR); in nmport_register()
610 if (d->fd < 0) { in nmport_register()
615 if (ioctl(d->fd, NIOCCTRL, &d->hdr) < 0) { in nmport_register()
619 nmreq_foreach_option(&d->hdr, o) { in nmport_register()
620 if (o->nro_status) { in nmport_register()
622 d->hdr.nr_name, in nmport_register()
623 nmreq_option_name(o->nro_reqtype), in nmport_register()
624 strerror(o->nro_status)); in nmport_register()
630 nmctx_ferror(ctx, "%s: %s", d->hdr.nr_name, strerror(errno)); in nmport_register()
634 d->register_done = 1; in nmport_register()
639 nmport_undo_register(d); in nmport_register()
640 return -1; in nmport_register()
644 nmport_undo_register(struct nmport_d *d) in nmport_undo_register() argument
646 if (d->fd >= 0) in nmport_undo_register()
647 close(d->fd); in nmport_undo_register()
648 d->fd = -1; in nmport_undo_register()
649 d->register_done = 0; in nmport_undo_register()
652 /* lookup the mem_id in the mem-list: do a new mmap() if
656 nmport_mmap(struct nmport_d *d) in nmport_mmap() argument
658 struct nmctx *ctx = d->ctx; in nmport_mmap()
663 if (d->mmap_done) { in nmport_mmap()
665 nmctx_ferror(ctx, "%s: already mapped", d->hdr.nr_name); in nmport_mmap()
666 return -1; in nmport_mmap()
669 if (!d->register_done) { in nmport_mmap()
672 return -1; in nmport_mmap()
677 for (m = ctx->mem_descs; m != NULL; m = m->next) in nmport_mmap()
678 if (m->mem_id == d->reg.nr_mem_id) in nmport_mmap()
688 if (d->extmem != NULL) { in nmport_mmap()
689 m->mem = (void *)((uintptr_t)d->extmem->nro_usrptr); in nmport_mmap()
690 m->size = d->extmem->nro_info.nr_memsize; in nmport_mmap()
691 m->is_extmem = 1; in nmport_mmap()
693 m->mem = mmap(NULL, d->reg.nr_memsize, PROT_READ|PROT_WRITE, in nmport_mmap()
694 MAP_SHARED, d->fd, 0); in nmport_mmap()
695 if (m->mem == MAP_FAILED) { in nmport_mmap()
699 m->size = d->reg.nr_memsize; in nmport_mmap()
701 m->mem_id = d->reg.nr_mem_id; in nmport_mmap()
702 m->next = ctx->mem_descs; in nmport_mmap()
703 if (ctx->mem_descs != NULL) in nmport_mmap()
704 ctx->mem_descs->prev = m; in nmport_mmap()
705 ctx->mem_descs = m; in nmport_mmap()
707 m->refcount++; in nmport_mmap()
711 d->mem = m; in nmport_mmap()
713 d->nifp = NETMAP_IF(m->mem, d->reg.nr_offset); in nmport_mmap()
715 num_tx = d->reg.nr_tx_rings + d->nifp->ni_host_tx_rings; in nmport_mmap()
716 for (i = 0; i < num_tx && !d->nifp->ring_ofs[i]; i++) in nmport_mmap()
718 d->cur_tx_ring = d->first_tx_ring = i; in nmport_mmap()
719 for ( ; i < num_tx && d->nifp->ring_ofs[i]; i++) in nmport_mmap()
721 d->last_tx_ring = i - 1; in nmport_mmap()
723 num_rx = d->reg.nr_rx_rings + d->nifp->ni_host_rx_rings; in nmport_mmap()
724 for (i = 0; i < num_rx && !d->nifp->ring_ofs[i + num_tx]; i++) in nmport_mmap()
726 d->cur_rx_ring = d->first_rx_ring = i; in nmport_mmap()
727 for ( ; i < num_rx && d->nifp->ring_ofs[i + num_tx]; i++) in nmport_mmap()
729 d->last_rx_ring = i - 1; in nmport_mmap()
731 d->mmap_done = 1; in nmport_mmap()
737 nmport_undo_mmap(d); in nmport_mmap()
738 return -1; in nmport_mmap()
742 nmport_undo_mmap(struct nmport_d *d) in nmport_undo_mmap() argument
745 struct nmctx *ctx = d->ctx; in nmport_undo_mmap()
747 m = d->mem; in nmport_undo_mmap()
751 m->refcount--; in nmport_undo_mmap()
752 if (m->refcount <= 0) { in nmport_undo_mmap()
753 if (!m->is_extmem && m->mem != MAP_FAILED) in nmport_undo_mmap()
754 munmap(m->mem, m->size); in nmport_undo_mmap()
756 if (m->next != NULL) in nmport_undo_mmap()
757 m->next->prev = m->prev; in nmport_undo_mmap()
758 if (m->prev != NULL) in nmport_undo_mmap()
759 m->prev->next = m->next; in nmport_undo_mmap()
761 ctx->mem_descs = m->next; in nmport_undo_mmap()
763 d->mem = NULL; in nmport_undo_mmap()
766 d->mmap_done = 0; in nmport_undo_mmap()
767 d->mem = NULL; in nmport_undo_mmap()
768 d->nifp = NULL; in nmport_undo_mmap()
769 d->first_tx_ring = 0; in nmport_undo_mmap()
770 d->last_tx_ring = 0; in nmport_undo_mmap()
771 d->first_rx_ring = 0; in nmport_undo_mmap()
772 d->last_rx_ring = 0; in nmport_undo_mmap()
773 d->cur_tx_ring = 0; in nmport_undo_mmap()
774 d->cur_rx_ring = 0; in nmport_undo_mmap()
778 nmport_open_desc(struct nmport_d *d) in nmport_open_desc() argument
780 if (nmport_register(d) < 0) in nmport_open_desc()
783 if (nmport_mmap(d) < 0) in nmport_open_desc()
788 nmport_undo_open_desc(d); in nmport_open_desc()
789 return -1; in nmport_open_desc()
793 nmport_undo_open_desc(struct nmport_d *d) in nmport_undo_open_desc() argument
795 nmport_undo_mmap(d); in nmport_undo_open_desc()
796 nmport_undo_register(d); in nmport_undo_open_desc()
803 struct nmport_d *d; in nmport_open() local
806 d = nmport_prepare(ifname); in nmport_open()
807 if (d == NULL) in nmport_open()
811 if (nmport_open_desc(d) < 0) in nmport_open()
814 return d; in nmport_open()
817 nmport_close(d); in nmport_open()
822 nmport_close(struct nmport_d *d) in nmport_close() argument
824 if (d == NULL) in nmport_close()
826 nmport_undo_open_desc(d); in nmport_close()
827 nmport_undo_prepare(d); in nmport_close()
831 nmport_clone(struct nmport_d *d) in nmport_clone() argument
836 ctx = d->ctx; in nmport_clone()
838 if (d->extmem != NULL && !d->register_done) { in nmport_clone()
848 c->hdr = d->hdr; in nmport_clone()
850 c->hdr.nr_body = (uintptr_t)&c->reg; in nmport_clone()
852 c->hdr.nr_options = 0; in nmport_clone()
853 c->reg = d->reg; /* this also copies the mem_id */ in nmport_clone()
854 /* put the new port in an un-registered, unmapped state */ in nmport_clone()
855 c->fd = -1; in nmport_clone()
856 c->nifp = NULL; in nmport_clone()
857 c->register_done = 0; in nmport_clone()
858 c->mem = NULL; in nmport_clone()
859 c->extmem = NULL; in nmport_clone()
860 c->mmap_done = 0; in nmport_clone()
861 c->first_tx_ring = 0; in nmport_clone()
862 c->last_tx_ring = 0; in nmport_clone()
863 c->first_rx_ring = 0; in nmport_clone()
864 c->last_rx_ring = 0; in nmport_clone()
865 c->cur_tx_ring = 0; in nmport_clone()
866 c->cur_rx_ring = 0; in nmport_clone()
872 nmport_inject(struct nmport_d *d, const void *buf, size_t size) in nmport_inject() argument
874 u_int c, n = d->last_tx_ring - d->first_tx_ring + 1, in nmport_inject()
875 ri = d->cur_tx_ring; in nmport_inject()
883 if (ri > d->last_tx_ring) in nmport_inject()
884 ri = d->first_tx_ring; in nmport_inject()
885 ring = NETMAP_TXRING(d->nifp, ri); in nmport_inject()
887 j = ring->cur; in nmport_inject()
888 while (rem > ring->nr_buf_size && j != ring->tail) { in nmport_inject()
889 rem -= ring->nr_buf_size; in nmport_inject()
892 if (j == ring->tail && rem > 0) in nmport_inject()
894 i = ring->cur; in nmport_inject()
896 idx = ring->slot[i].buf_idx; in nmport_inject()
897 ring->slot[i].len = ring->nr_buf_size; in nmport_inject()
898 ring->slot[i].flags = NS_MOREFRAG; in nmport_inject()
899 nm_pkt_copy(buf, NETMAP_BUF(ring, idx), ring->nr_buf_size); in nmport_inject()
901 buf = (char *)buf + ring->nr_buf_size; in nmport_inject()
903 idx = ring->slot[i].buf_idx; in nmport_inject()
904 ring->slot[i].len = rem; in nmport_inject()
905 ring->slot[i].flags = 0; in nmport_inject()
907 ring->head = ring->cur = nm_ring_next(ring, i); in nmport_inject()
908 d->cur_tx_ring = ri; in nmport_inject()