xref: /dpdk/lib/mbuf/rte_mbuf.c (revision ae67895b507bb6af22263c79ba0d5c374b396485)
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2010-2014 Intel Corporation.
3  * Copyright 2014 6WIND S.A.
4  */
5 
6 #include <string.h>
7 #include <stdio.h>
8 #include <stdint.h>
9 #include <inttypes.h>
10 #include <errno.h>
11 
12 #include <rte_debug.h>
13 #include <rte_common.h>
14 #include <rte_log.h>
15 #include <rte_branch_prediction.h>
16 #include <rte_mempool.h>
17 #include <rte_mbuf.h>
18 #include <rte_mbuf_pool_ops.h>
19 #include <rte_hexdump.h>
20 #include <rte_errno.h>
21 #include <rte_memcpy.h>
22 
23 #include "mbuf_log.h"
24 
25 RTE_LOG_REGISTER_DEFAULT(mbuf_logtype, INFO);
26 
27 /*
28  * pktmbuf pool constructor, given as a callback function to
29  * rte_mempool_create(), or called directly if using
30  * rte_mempool_create_empty()/rte_mempool_populate()
31  */
32 void
rte_pktmbuf_pool_init(struct rte_mempool * mp,void * opaque_arg)33 rte_pktmbuf_pool_init(struct rte_mempool *mp, void *opaque_arg)
34 {
35 	struct rte_pktmbuf_pool_private *user_mbp_priv, *mbp_priv;
36 	struct rte_pktmbuf_pool_private default_mbp_priv;
37 	uint16_t roomsz;
38 
39 	RTE_ASSERT(mp->private_data_size >=
40 		   sizeof(struct rte_pktmbuf_pool_private));
41 	RTE_ASSERT(mp->elt_size >= sizeof(struct rte_mbuf));
42 
43 	/* if no structure is provided, assume no mbuf private area */
44 	user_mbp_priv = opaque_arg;
45 	if (user_mbp_priv == NULL) {
46 		memset(&default_mbp_priv, 0, sizeof(default_mbp_priv));
47 		if (mp->elt_size > sizeof(struct rte_mbuf))
48 			roomsz = mp->elt_size - sizeof(struct rte_mbuf);
49 		else
50 			roomsz = 0;
51 		default_mbp_priv.mbuf_data_room_size = roomsz;
52 		user_mbp_priv = &default_mbp_priv;
53 	}
54 
55 	RTE_ASSERT(mp->elt_size >= sizeof(struct rte_mbuf) +
56 		((user_mbp_priv->flags & RTE_PKTMBUF_POOL_F_PINNED_EXT_BUF) ?
57 			sizeof(struct rte_mbuf_ext_shared_info) :
58 			user_mbp_priv->mbuf_data_room_size) +
59 		user_mbp_priv->mbuf_priv_size);
60 	RTE_ASSERT((user_mbp_priv->flags &
61 		    ~RTE_PKTMBUF_POOL_F_PINNED_EXT_BUF) == 0);
62 
63 	mbp_priv = rte_mempool_get_priv(mp);
64 	memcpy(mbp_priv, user_mbp_priv, sizeof(*mbp_priv));
65 }
66 
67 /*
68  * pktmbuf constructor, given as a callback function to
69  * rte_mempool_obj_iter() or rte_mempool_create().
70  * Set the fields of a packet mbuf to their default values.
71  */
72 void
rte_pktmbuf_init(struct rte_mempool * mp,__rte_unused void * opaque_arg,void * _m,__rte_unused unsigned i)73 rte_pktmbuf_init(struct rte_mempool *mp,
74 		 __rte_unused void *opaque_arg,
75 		 void *_m,
76 		 __rte_unused unsigned i)
77 {
78 	struct rte_mbuf *m = _m;
79 	uint32_t mbuf_size, buf_len, priv_size;
80 
81 	RTE_ASSERT(mp->private_data_size >=
82 		   sizeof(struct rte_pktmbuf_pool_private));
83 
84 	priv_size = rte_pktmbuf_priv_size(mp);
85 	mbuf_size = sizeof(struct rte_mbuf) + priv_size;
86 	buf_len = rte_pktmbuf_data_room_size(mp);
87 
88 	RTE_ASSERT(RTE_ALIGN(priv_size, RTE_MBUF_PRIV_ALIGN) == priv_size);
89 	RTE_ASSERT(mp->elt_size >= mbuf_size);
90 	RTE_ASSERT(buf_len <= UINT16_MAX);
91 
92 	memset(m, 0, mbuf_size);
93 	/* start of buffer is after mbuf structure and priv data */
94 	m->priv_size = priv_size;
95 	m->buf_addr = (char *)m + mbuf_size;
96 	rte_mbuf_iova_set(m, rte_mempool_virt2iova(m) + mbuf_size);
97 	m->buf_len = (uint16_t)buf_len;
98 
99 	/* keep some headroom between start of buffer and data */
100 	m->data_off = RTE_MIN(RTE_PKTMBUF_HEADROOM, (uint16_t)m->buf_len);
101 
102 	/* init some constant fields */
103 	m->pool = mp;
104 	m->nb_segs = 1;
105 	m->port = RTE_MBUF_PORT_INVALID;
106 	rte_mbuf_refcnt_set(m, 1);
107 	m->next = NULL;
108 }
109 
110 /*
111  * @internal The callback routine called when reference counter in shinfo
112  * for mbufs with pinned external buffer reaches zero. It means there is
113  * no more reference to buffer backing mbuf and this one should be freed.
114  * This routine is called for the regular (not with pinned external or
115  * indirect buffer) mbufs on detaching from the mbuf with pinned external
116  * buffer.
117  */
118 static void
rte_pktmbuf_free_pinned_extmem(void * addr,void * opaque)119 rte_pktmbuf_free_pinned_extmem(void *addr, void *opaque)
120 {
121 	struct rte_mbuf *m = opaque;
122 
123 	RTE_SET_USED(addr);
124 	RTE_ASSERT(RTE_MBUF_HAS_EXTBUF(m));
125 	RTE_ASSERT(RTE_MBUF_HAS_PINNED_EXTBUF(m));
126 	RTE_ASSERT(m->shinfo->fcb_opaque == m);
127 
128 	rte_mbuf_ext_refcnt_set(m->shinfo, 1);
129 	m->ol_flags = RTE_MBUF_F_EXTERNAL;
130 	if (m->next != NULL)
131 		m->next = NULL;
132 	if (m->nb_segs != 1)
133 		m->nb_segs = 1;
134 	rte_mbuf_raw_free(m);
135 }
136 
137 /** The context to initialize the mbufs with pinned external buffers. */
138 struct rte_pktmbuf_extmem_init_ctx {
139 	const struct rte_pktmbuf_extmem *ext_mem; /* descriptor array. */
140 	unsigned int ext_num; /* number of descriptors in array. */
141 	unsigned int ext; /* loop descriptor index. */
142 	size_t off; /* loop buffer offset. */
143 };
144 
145 /**
146  * @internal Packet mbuf constructor for pools with pinned external memory.
147  *
148  * This function initializes some fields in the mbuf structure that are
149  * not modified by the user once created (origin pool, buffer start
150  * address, and so on). This function is given as a callback function to
151  * rte_mempool_obj_iter() called from rte_mempool_create_extmem().
152  *
153  * @param mp
154  *   The mempool from which mbufs originate.
155  * @param opaque_arg
156  *   A pointer to the rte_pktmbuf_extmem_init_ctx - initialization
157  *   context structure
158  * @param m
159  *   The mbuf to initialize.
160  * @param i
161  *   The index of the mbuf in the pool table.
162  */
163 static void
__rte_pktmbuf_init_extmem(struct rte_mempool * mp,void * opaque_arg,void * _m,__rte_unused unsigned int i)164 __rte_pktmbuf_init_extmem(struct rte_mempool *mp,
165 			  void *opaque_arg,
166 			  void *_m,
167 			  __rte_unused unsigned int i)
168 {
169 	struct rte_mbuf *m = _m;
170 	struct rte_pktmbuf_extmem_init_ctx *ctx = opaque_arg;
171 	const struct rte_pktmbuf_extmem *ext_mem;
172 	uint32_t mbuf_size, buf_len, priv_size;
173 	struct rte_mbuf_ext_shared_info *shinfo;
174 
175 	priv_size = rte_pktmbuf_priv_size(mp);
176 	mbuf_size = sizeof(struct rte_mbuf) + priv_size;
177 	buf_len = rte_pktmbuf_data_room_size(mp);
178 
179 	RTE_ASSERT(RTE_ALIGN(priv_size, RTE_MBUF_PRIV_ALIGN) == priv_size);
180 	RTE_ASSERT(mp->elt_size >= mbuf_size);
181 	RTE_ASSERT(buf_len <= UINT16_MAX);
182 
183 	memset(m, 0, mbuf_size);
184 	m->priv_size = priv_size;
185 	m->buf_len = (uint16_t)buf_len;
186 
187 	/* set the data buffer pointers to external memory */
188 	ext_mem = ctx->ext_mem + ctx->ext;
189 
190 	RTE_ASSERT(ctx->ext < ctx->ext_num);
191 	RTE_ASSERT(ctx->off + ext_mem->elt_size <= ext_mem->buf_len);
192 
193 	m->buf_addr = RTE_PTR_ADD(ext_mem->buf_ptr, ctx->off);
194 	rte_mbuf_iova_set(m, ext_mem->buf_iova == RTE_BAD_IOVA ? RTE_BAD_IOVA :
195 								 (ext_mem->buf_iova + ctx->off));
196 
197 	ctx->off += ext_mem->elt_size;
198 	if (ctx->off + ext_mem->elt_size > ext_mem->buf_len) {
199 		ctx->off = 0;
200 		++ctx->ext;
201 	}
202 	/* keep some headroom between start of buffer and data */
203 	m->data_off = RTE_MIN(RTE_PKTMBUF_HEADROOM, (uint16_t)m->buf_len);
204 
205 	/* init some constant fields */
206 	m->pool = mp;
207 	m->nb_segs = 1;
208 	m->port = RTE_MBUF_PORT_INVALID;
209 	m->ol_flags = RTE_MBUF_F_EXTERNAL;
210 	rte_mbuf_refcnt_set(m, 1);
211 	m->next = NULL;
212 
213 	/* init external buffer shared info items */
214 	shinfo = RTE_PTR_ADD(m, mbuf_size);
215 	m->shinfo = shinfo;
216 	shinfo->free_cb = rte_pktmbuf_free_pinned_extmem;
217 	shinfo->fcb_opaque = m;
218 	rte_mbuf_ext_refcnt_set(shinfo, 1);
219 }
220 
221 /* Helper to create a mbuf pool with given mempool ops name*/
222 struct rte_mempool *
rte_pktmbuf_pool_create_by_ops(const char * name,unsigned int n,unsigned int cache_size,uint16_t priv_size,uint16_t data_room_size,int socket_id,const char * ops_name)223 rte_pktmbuf_pool_create_by_ops(const char *name, unsigned int n,
224 	unsigned int cache_size, uint16_t priv_size, uint16_t data_room_size,
225 	int socket_id, const char *ops_name)
226 {
227 	struct rte_mempool *mp;
228 	struct rte_pktmbuf_pool_private mbp_priv;
229 	const char *mp_ops_name = ops_name;
230 	unsigned elt_size;
231 	int ret;
232 
233 	if (RTE_ALIGN(priv_size, RTE_MBUF_PRIV_ALIGN) != priv_size) {
234 		MBUF_LOG(ERR, "mbuf priv_size=%u is not aligned",
235 			priv_size);
236 		rte_errno = EINVAL;
237 		return NULL;
238 	}
239 	elt_size = sizeof(struct rte_mbuf) + (unsigned)priv_size +
240 		(unsigned)data_room_size;
241 	memset(&mbp_priv, 0, sizeof(mbp_priv));
242 	mbp_priv.mbuf_data_room_size = data_room_size;
243 	mbp_priv.mbuf_priv_size = priv_size;
244 
245 	mp = rte_mempool_create_empty(name, n, elt_size, cache_size,
246 		 sizeof(struct rte_pktmbuf_pool_private), socket_id, 0);
247 	if (mp == NULL)
248 		return NULL;
249 
250 	if (mp_ops_name == NULL)
251 		mp_ops_name = rte_mbuf_best_mempool_ops();
252 	ret = rte_mempool_set_ops_byname(mp, mp_ops_name, NULL);
253 	if (ret != 0) {
254 		MBUF_LOG(ERR, "error setting mempool handler");
255 		rte_mempool_free(mp);
256 		rte_errno = -ret;
257 		return NULL;
258 	}
259 	rte_pktmbuf_pool_init(mp, &mbp_priv);
260 
261 	ret = rte_mempool_populate_default(mp);
262 	if (ret < 0) {
263 		rte_mempool_free(mp);
264 		rte_errno = -ret;
265 		return NULL;
266 	}
267 
268 	rte_mempool_obj_iter(mp, rte_pktmbuf_init, NULL);
269 
270 	return mp;
271 }
272 
273 /* helper to create a mbuf pool */
274 struct rte_mempool *
rte_pktmbuf_pool_create(const char * name,unsigned int n,unsigned int cache_size,uint16_t priv_size,uint16_t data_room_size,int socket_id)275 rte_pktmbuf_pool_create(const char *name, unsigned int n,
276 	unsigned int cache_size, uint16_t priv_size, uint16_t data_room_size,
277 	int socket_id)
278 {
279 	return rte_pktmbuf_pool_create_by_ops(name, n, cache_size, priv_size,
280 			data_room_size, socket_id, NULL);
281 }
282 
283 /* Helper to create a mbuf pool with pinned external data buffers. */
284 struct rte_mempool *
rte_pktmbuf_pool_create_extbuf(const char * name,unsigned int n,unsigned int cache_size,uint16_t priv_size,uint16_t data_room_size,int socket_id,const struct rte_pktmbuf_extmem * ext_mem,unsigned int ext_num)285 rte_pktmbuf_pool_create_extbuf(const char *name, unsigned int n,
286 	unsigned int cache_size, uint16_t priv_size,
287 	uint16_t data_room_size, int socket_id,
288 	const struct rte_pktmbuf_extmem *ext_mem,
289 	unsigned int ext_num)
290 {
291 	struct rte_mempool *mp;
292 	struct rte_pktmbuf_pool_private mbp_priv;
293 	struct rte_pktmbuf_extmem_init_ctx init_ctx;
294 	const char *mp_ops_name;
295 	unsigned int elt_size;
296 	unsigned int i, n_elts = 0;
297 	int ret;
298 
299 	if (RTE_ALIGN(priv_size, RTE_MBUF_PRIV_ALIGN) != priv_size) {
300 		MBUF_LOG(ERR, "mbuf priv_size=%u is not aligned",
301 			priv_size);
302 		rte_errno = EINVAL;
303 		return NULL;
304 	}
305 	/* Check the external memory descriptors. */
306 	for (i = 0; i < ext_num; i++) {
307 		const struct rte_pktmbuf_extmem *extm = ext_mem + i;
308 
309 		if (!extm->elt_size || !extm->buf_len || !extm->buf_ptr) {
310 			MBUF_LOG(ERR, "invalid extmem descriptor");
311 			rte_errno = EINVAL;
312 			return NULL;
313 		}
314 		if (data_room_size > extm->elt_size) {
315 			MBUF_LOG(ERR, "ext elt_size=%u is too small",
316 				priv_size);
317 			rte_errno = EINVAL;
318 			return NULL;
319 		}
320 		n_elts += extm->buf_len / extm->elt_size;
321 	}
322 	/* Check whether enough external memory provided. */
323 	if (n_elts < n) {
324 		MBUF_LOG(ERR, "not enough extmem");
325 		rte_errno = ENOMEM;
326 		return NULL;
327 	}
328 	elt_size = sizeof(struct rte_mbuf) +
329 		   (unsigned int)priv_size +
330 		   sizeof(struct rte_mbuf_ext_shared_info);
331 
332 	memset(&mbp_priv, 0, sizeof(mbp_priv));
333 	mbp_priv.mbuf_data_room_size = data_room_size;
334 	mbp_priv.mbuf_priv_size = priv_size;
335 	mbp_priv.flags = RTE_PKTMBUF_POOL_F_PINNED_EXT_BUF;
336 
337 	mp = rte_mempool_create_empty(name, n, elt_size, cache_size,
338 		 sizeof(struct rte_pktmbuf_pool_private), socket_id, 0);
339 	if (mp == NULL)
340 		return NULL;
341 
342 	mp_ops_name = rte_mbuf_best_mempool_ops();
343 	ret = rte_mempool_set_ops_byname(mp, mp_ops_name, NULL);
344 	if (ret != 0) {
345 		MBUF_LOG(ERR, "error setting mempool handler");
346 		rte_mempool_free(mp);
347 		rte_errno = -ret;
348 		return NULL;
349 	}
350 	rte_pktmbuf_pool_init(mp, &mbp_priv);
351 
352 	ret = rte_mempool_populate_default(mp);
353 	if (ret < 0) {
354 		rte_mempool_free(mp);
355 		rte_errno = -ret;
356 		return NULL;
357 	}
358 
359 	init_ctx = (struct rte_pktmbuf_extmem_init_ctx){
360 		.ext_mem = ext_mem,
361 		.ext_num = ext_num,
362 		.ext = 0,
363 		.off = 0,
364 	};
365 	rte_mempool_obj_iter(mp, __rte_pktmbuf_init_extmem, &init_ctx);
366 
367 	return mp;
368 }
369 
370 /* do some sanity checks on a mbuf: panic if it fails */
371 void
rte_mbuf_sanity_check(const struct rte_mbuf * m,int is_header)372 rte_mbuf_sanity_check(const struct rte_mbuf *m, int is_header)
373 {
374 	const char *reason;
375 
376 	if (rte_mbuf_check(m, is_header, &reason))
377 		rte_panic("%s\n", reason);
378 }
379 
rte_mbuf_check(const struct rte_mbuf * m,int is_header,const char ** reason)380 int rte_mbuf_check(const struct rte_mbuf *m, int is_header,
381 		   const char **reason)
382 {
383 	unsigned int nb_segs, pkt_len;
384 
385 	if (m == NULL) {
386 		*reason = "mbuf is NULL";
387 		return -1;
388 	}
389 
390 	/* generic checks */
391 	if (m->pool == NULL) {
392 		*reason = "bad mbuf pool";
393 		return -1;
394 	}
395 	if (RTE_IOVA_IN_MBUF && rte_mbuf_iova_get(m) == 0) {
396 		*reason = "bad IO addr";
397 		return -1;
398 	}
399 	if (m->buf_addr == NULL) {
400 		*reason = "bad virt addr";
401 		return -1;
402 	}
403 
404 	uint16_t cnt = rte_mbuf_refcnt_read(m);
405 	if ((cnt == 0) || (cnt == UINT16_MAX)) {
406 		*reason = "bad ref cnt";
407 		return -1;
408 	}
409 
410 	/* nothing to check for sub-segments */
411 	if (is_header == 0)
412 		return 0;
413 
414 	/* data_len is supposed to be not more than pkt_len */
415 	if (m->data_len > m->pkt_len) {
416 		*reason = "bad data_len";
417 		return -1;
418 	}
419 
420 	nb_segs = m->nb_segs;
421 	pkt_len = m->pkt_len;
422 
423 	do {
424 		if (m->data_off > m->buf_len) {
425 			*reason = "data offset too big in mbuf segment";
426 			return -1;
427 		}
428 		if (m->data_off + m->data_len > m->buf_len) {
429 			*reason = "data length too big in mbuf segment";
430 			return -1;
431 		}
432 		nb_segs -= 1;
433 		pkt_len -= m->data_len;
434 	} while ((m = m->next) != NULL);
435 
436 	if (nb_segs) {
437 		*reason = "bad nb_segs";
438 		return -1;
439 	}
440 	if (pkt_len) {
441 		*reason = "bad pkt_len";
442 		return -1;
443 	}
444 
445 	return 0;
446 }
447 
448 /**
449  * @internal helper function for freeing a bulk of packet mbuf segments
450  * via an array holding the packet mbuf segments from the same mempool
451  * pending to be freed.
452  *
453  * @param m
454  *  The packet mbuf segment to be freed.
455  * @param pending
456  *  Pointer to the array of packet mbuf segments pending to be freed.
457  * @param nb_pending
458  *  Pointer to the number of elements held in the array.
459  * @param pending_sz
460  *  Number of elements the array can hold.
461  *  Note: The compiler should optimize this parameter away when using a
462  *  constant value, such as RTE_PKTMBUF_FREE_PENDING_SZ.
463  */
464 static void
__rte_pktmbuf_free_seg_via_array(struct rte_mbuf * m,struct rte_mbuf ** const pending,unsigned int * const nb_pending,const unsigned int pending_sz)465 __rte_pktmbuf_free_seg_via_array(struct rte_mbuf *m,
466 	struct rte_mbuf ** const pending, unsigned int * const nb_pending,
467 	const unsigned int pending_sz)
468 {
469 	m = rte_pktmbuf_prefree_seg(m);
470 	if (likely(m != NULL)) {
471 		if (*nb_pending == pending_sz ||
472 		    (*nb_pending > 0 && m->pool != pending[0]->pool)) {
473 			rte_mempool_put_bulk(pending[0]->pool,
474 					(void **)pending, *nb_pending);
475 			*nb_pending = 0;
476 		}
477 
478 		pending[(*nb_pending)++] = m;
479 	}
480 }
481 
482 /**
483  * Size of the array holding mbufs from the same mempool pending to be freed
484  * in bulk.
485  */
486 #define RTE_PKTMBUF_FREE_PENDING_SZ 64
487 
488 /* Free a bulk of packet mbufs back into their original mempools. */
rte_pktmbuf_free_bulk(struct rte_mbuf ** mbufs,unsigned int count)489 void rte_pktmbuf_free_bulk(struct rte_mbuf **mbufs, unsigned int count)
490 {
491 	struct rte_mbuf *m, *m_next, *pending[RTE_PKTMBUF_FREE_PENDING_SZ];
492 	unsigned int idx, nb_pending = 0;
493 
494 	for (idx = 0; idx < count; idx++) {
495 		m = mbufs[idx];
496 		if (unlikely(m == NULL))
497 			continue;
498 
499 		__rte_mbuf_sanity_check(m, 1);
500 
501 		do {
502 			m_next = m->next;
503 			__rte_pktmbuf_free_seg_via_array(m,
504 					pending, &nb_pending,
505 					RTE_PKTMBUF_FREE_PENDING_SZ);
506 			m = m_next;
507 		} while (m != NULL);
508 	}
509 
510 	if (nb_pending > 0)
511 		rte_mempool_put_bulk(pending[0]->pool, (void **)pending, nb_pending);
512 }
513 
514 /* Creates a shallow copy of mbuf */
515 struct rte_mbuf *
rte_pktmbuf_clone(struct rte_mbuf * md,struct rte_mempool * mp)516 rte_pktmbuf_clone(struct rte_mbuf *md, struct rte_mempool *mp)
517 {
518 	struct rte_mbuf *mc, *mi, **prev;
519 	uint32_t pktlen;
520 	uint16_t nseg;
521 
522 	mc = rte_pktmbuf_alloc(mp);
523 	if (unlikely(mc == NULL))
524 		return NULL;
525 
526 	mi = mc;
527 	prev = &mi->next;
528 	pktlen = md->pkt_len;
529 	nseg = 0;
530 
531 	do {
532 		nseg++;
533 		rte_pktmbuf_attach(mi, md);
534 		*prev = mi;
535 		prev = &mi->next;
536 	} while ((md = md->next) != NULL &&
537 	    (mi = rte_pktmbuf_alloc(mp)) != NULL);
538 
539 	*prev = NULL;
540 	mc->nb_segs = nseg;
541 	mc->pkt_len = pktlen;
542 
543 	/* Allocation of new indirect segment failed */
544 	if (unlikely(mi == NULL)) {
545 		rte_pktmbuf_free(mc);
546 		return NULL;
547 	}
548 
549 	__rte_mbuf_sanity_check(mc, 1);
550 	return mc;
551 }
552 
553 /* convert multi-segment mbuf to single mbuf */
554 int
__rte_pktmbuf_linearize(struct rte_mbuf * mbuf)555 __rte_pktmbuf_linearize(struct rte_mbuf *mbuf)
556 {
557 	size_t seg_len, copy_len;
558 	struct rte_mbuf *m;
559 	struct rte_mbuf *m_next;
560 	char *buffer;
561 
562 	/* Extend first segment to the total packet length */
563 	copy_len = rte_pktmbuf_pkt_len(mbuf) - rte_pktmbuf_data_len(mbuf);
564 
565 	if (unlikely(copy_len > rte_pktmbuf_tailroom(mbuf)))
566 		return -1;
567 
568 	buffer = rte_pktmbuf_mtod_offset(mbuf, char *, mbuf->data_len);
569 	mbuf->data_len = (uint16_t)(mbuf->pkt_len);
570 
571 	/* Append data from next segments to the first one */
572 	m = mbuf->next;
573 	while (m != NULL) {
574 		m_next = m->next;
575 
576 		seg_len = rte_pktmbuf_data_len(m);
577 		rte_memcpy(buffer, rte_pktmbuf_mtod(m, char *), seg_len);
578 		buffer += seg_len;
579 
580 		rte_pktmbuf_free_seg(m);
581 		m = m_next;
582 	}
583 
584 	mbuf->next = NULL;
585 	mbuf->nb_segs = 1;
586 
587 	return 0;
588 }
589 
590 /* Create a deep copy of mbuf */
591 struct rte_mbuf *
rte_pktmbuf_copy(const struct rte_mbuf * m,struct rte_mempool * mp,uint32_t off,uint32_t len)592 rte_pktmbuf_copy(const struct rte_mbuf *m, struct rte_mempool *mp,
593 		 uint32_t off, uint32_t len)
594 {
595 	const struct rte_mbuf *seg = m;
596 	struct rte_mbuf *mc, *m_last, **prev;
597 
598 	/* garbage in check */
599 	__rte_mbuf_sanity_check(m, 1);
600 
601 	/* check for request to copy at offset past end of mbuf */
602 	if (unlikely(off >= m->pkt_len))
603 		return NULL;
604 
605 	mc = rte_pktmbuf_alloc(mp);
606 	if (unlikely(mc == NULL))
607 		return NULL;
608 
609 	/* truncate requested length to available data */
610 	if (len > m->pkt_len - off)
611 		len = m->pkt_len - off;
612 
613 	__rte_pktmbuf_copy_hdr(mc, m);
614 
615 	/* copied mbuf is not indirect or external */
616 	mc->ol_flags = m->ol_flags & ~(RTE_MBUF_F_INDIRECT|RTE_MBUF_F_EXTERNAL);
617 
618 	prev = &mc->next;
619 	m_last = mc;
620 	while (len > 0) {
621 		uint32_t copy_len;
622 
623 		/* skip leading mbuf segments */
624 		while (off >= seg->data_len) {
625 			off -= seg->data_len;
626 			seg = seg->next;
627 		}
628 
629 		/* current buffer is full, chain a new one */
630 		if (rte_pktmbuf_tailroom(m_last) == 0) {
631 			m_last = rte_pktmbuf_alloc(mp);
632 			if (unlikely(m_last == NULL)) {
633 				rte_pktmbuf_free(mc);
634 				return NULL;
635 			}
636 			++mc->nb_segs;
637 			*prev = m_last;
638 			prev = &m_last->next;
639 		}
640 
641 		/*
642 		 * copy the min of data in input segment (seg)
643 		 * vs space available in output (m_last)
644 		 */
645 		copy_len = RTE_MIN(seg->data_len - off, len);
646 		if (copy_len > rte_pktmbuf_tailroom(m_last))
647 			copy_len = rte_pktmbuf_tailroom(m_last);
648 
649 		/* append from seg to m_last */
650 		rte_memcpy(rte_pktmbuf_mtod_offset(m_last, char *,
651 						   m_last->data_len),
652 			   rte_pktmbuf_mtod_offset(seg, char *, off),
653 			   copy_len);
654 
655 		/* update offsets and lengths */
656 		m_last->data_len += copy_len;
657 		mc->pkt_len += copy_len;
658 		off += copy_len;
659 		len -= copy_len;
660 	}
661 
662 	/* garbage out check */
663 	__rte_mbuf_sanity_check(mc, 1);
664 	return mc;
665 }
666 
667 /* dump a mbuf on console */
668 void
rte_pktmbuf_dump(FILE * f,const struct rte_mbuf * m,unsigned dump_len)669 rte_pktmbuf_dump(FILE *f, const struct rte_mbuf *m, unsigned dump_len)
670 {
671 	unsigned int len;
672 	unsigned int nb_segs;
673 
674 	__rte_mbuf_sanity_check(m, 1);
675 
676 	fprintf(f, "dump mbuf at %p, iova=%#" PRIx64 ", buf_len=%u\n", m, rte_mbuf_iova_get(m),
677 		m->buf_len);
678 	fprintf(f, "  pkt_len=%u, ol_flags=%#"PRIx64", nb_segs=%u, port=%u",
679 		m->pkt_len, m->ol_flags, m->nb_segs, m->port);
680 
681 	if (m->ol_flags & (RTE_MBUF_F_RX_QINQ | RTE_MBUF_F_TX_QINQ))
682 		fprintf(f, ", vlan_tci_outer=%u", m->vlan_tci_outer);
683 
684 	if (m->ol_flags & (RTE_MBUF_F_RX_VLAN | RTE_MBUF_F_TX_VLAN))
685 		fprintf(f, ", vlan_tci=%u", m->vlan_tci);
686 
687 	fprintf(f, ", ptype=%#"PRIx32"\n", m->packet_type);
688 
689 	nb_segs = m->nb_segs;
690 
691 	while (m && nb_segs != 0) {
692 		__rte_mbuf_sanity_check(m, 0);
693 
694 		fprintf(f, "  segment at %p, data=%p, len=%u, off=%u, refcnt=%u\n",
695 			m, rte_pktmbuf_mtod(m, void *),
696 			m->data_len, m->data_off, rte_mbuf_refcnt_read(m));
697 
698 		len = dump_len;
699 		if (len > m->data_len)
700 			len = m->data_len;
701 		if (len != 0)
702 			rte_hexdump(f, NULL, rte_pktmbuf_mtod(m, void *), len);
703 		dump_len -= len;
704 		m = m->next;
705 		nb_segs --;
706 	}
707 }
708 
709 /* read len data bytes in a mbuf at specified offset (internal) */
__rte_pktmbuf_read(const struct rte_mbuf * m,uint32_t off,uint32_t len,void * buf)710 const void *__rte_pktmbuf_read(const struct rte_mbuf *m, uint32_t off,
711 	uint32_t len, void *buf)
712 {
713 	const struct rte_mbuf *seg = m;
714 	uint32_t buf_off = 0, copy_len;
715 
716 	if (off + len > rte_pktmbuf_pkt_len(m))
717 		return NULL;
718 
719 	while (off >= rte_pktmbuf_data_len(seg)) {
720 		off -= rte_pktmbuf_data_len(seg);
721 		seg = seg->next;
722 	}
723 
724 	if (off + len <= rte_pktmbuf_data_len(seg))
725 		return rte_pktmbuf_mtod_offset(seg, char *, off);
726 
727 	/* rare case: header is split among several segments */
728 	while (len > 0) {
729 		copy_len = rte_pktmbuf_data_len(seg) - off;
730 		if (copy_len > len)
731 			copy_len = len;
732 		rte_memcpy((char *)buf + buf_off,
733 			rte_pktmbuf_mtod_offset(seg, char *, off), copy_len);
734 		off = 0;
735 		buf_off += copy_len;
736 		len -= copy_len;
737 		seg = seg->next;
738 	}
739 
740 	return buf;
741 }
742 
743 /*
744  * Get the name of a RX offload flag. Must be kept synchronized with flag
745  * definitions in rte_mbuf.h.
746  */
rte_get_rx_ol_flag_name(uint64_t mask)747 const char *rte_get_rx_ol_flag_name(uint64_t mask)
748 {
749 	switch (mask) {
750 	case RTE_MBUF_F_RX_VLAN: return "RTE_MBUF_F_RX_VLAN";
751 	case RTE_MBUF_F_RX_RSS_HASH: return "RTE_MBUF_F_RX_RSS_HASH";
752 	case RTE_MBUF_F_RX_FDIR: return "RTE_MBUF_F_RX_FDIR";
753 	case RTE_MBUF_F_RX_L4_CKSUM_BAD: return "RTE_MBUF_F_RX_L4_CKSUM_BAD";
754 	case RTE_MBUF_F_RX_L4_CKSUM_GOOD: return "RTE_MBUF_F_RX_L4_CKSUM_GOOD";
755 	case RTE_MBUF_F_RX_L4_CKSUM_NONE: return "RTE_MBUF_F_RX_L4_CKSUM_NONE";
756 	case RTE_MBUF_F_RX_IP_CKSUM_BAD: return "RTE_MBUF_F_RX_IP_CKSUM_BAD";
757 	case RTE_MBUF_F_RX_IP_CKSUM_GOOD: return "RTE_MBUF_F_RX_IP_CKSUM_GOOD";
758 	case RTE_MBUF_F_RX_IP_CKSUM_NONE: return "RTE_MBUF_F_RX_IP_CKSUM_NONE";
759 	case RTE_MBUF_F_RX_OUTER_IP_CKSUM_BAD: return "RTE_MBUF_F_RX_OUTER_IP_CKSUM_BAD";
760 	case RTE_MBUF_F_RX_VLAN_STRIPPED: return "RTE_MBUF_F_RX_VLAN_STRIPPED";
761 	case RTE_MBUF_F_RX_IEEE1588_PTP: return "RTE_MBUF_F_RX_IEEE1588_PTP";
762 	case RTE_MBUF_F_RX_IEEE1588_TMST: return "RTE_MBUF_F_RX_IEEE1588_TMST";
763 	case RTE_MBUF_F_RX_FDIR_ID: return "RTE_MBUF_F_RX_FDIR_ID";
764 	case RTE_MBUF_F_RX_FDIR_FLX: return "RTE_MBUF_F_RX_FDIR_FLX";
765 	case RTE_MBUF_F_RX_QINQ_STRIPPED: return "RTE_MBUF_F_RX_QINQ_STRIPPED";
766 	case RTE_MBUF_F_RX_QINQ: return "RTE_MBUF_F_RX_QINQ";
767 	case RTE_MBUF_F_RX_LRO: return "RTE_MBUF_F_RX_LRO";
768 	case RTE_MBUF_F_RX_SEC_OFFLOAD: return "RTE_MBUF_F_RX_SEC_OFFLOAD";
769 	case RTE_MBUF_F_RX_SEC_OFFLOAD_FAILED: return "RTE_MBUF_F_RX_SEC_OFFLOAD_FAILED";
770 	case RTE_MBUF_F_RX_OUTER_L4_CKSUM_BAD: return "RTE_MBUF_F_RX_OUTER_L4_CKSUM_BAD";
771 	case RTE_MBUF_F_RX_OUTER_L4_CKSUM_GOOD: return "RTE_MBUF_F_RX_OUTER_L4_CKSUM_GOOD";
772 	case RTE_MBUF_F_RX_OUTER_L4_CKSUM_INVALID:
773 		return "RTE_MBUF_F_RX_OUTER_L4_CKSUM_INVALID";
774 
775 	default: return NULL;
776 	}
777 }
778 
779 struct flag_mask {
780 	uint64_t flag;
781 	uint64_t mask;
782 	const char *default_name;
783 };
784 
785 /* write the list of rx ol flags in buffer buf */
786 int
rte_get_rx_ol_flag_list(uint64_t mask,char * buf,size_t buflen)787 rte_get_rx_ol_flag_list(uint64_t mask, char *buf, size_t buflen)
788 {
789 	const struct flag_mask rx_flags[] = {
790 		{ RTE_MBUF_F_RX_VLAN, RTE_MBUF_F_RX_VLAN, NULL },
791 		{ RTE_MBUF_F_RX_RSS_HASH, RTE_MBUF_F_RX_RSS_HASH, NULL },
792 		{ RTE_MBUF_F_RX_FDIR, RTE_MBUF_F_RX_FDIR, NULL },
793 		{ RTE_MBUF_F_RX_L4_CKSUM_BAD, RTE_MBUF_F_RX_L4_CKSUM_MASK, NULL },
794 		{ RTE_MBUF_F_RX_L4_CKSUM_GOOD, RTE_MBUF_F_RX_L4_CKSUM_MASK, NULL },
795 		{ RTE_MBUF_F_RX_L4_CKSUM_NONE, RTE_MBUF_F_RX_L4_CKSUM_MASK, NULL },
796 		{ RTE_MBUF_F_RX_L4_CKSUM_UNKNOWN, RTE_MBUF_F_RX_L4_CKSUM_MASK,
797 		  "RTE_MBUF_F_RX_L4_CKSUM_UNKNOWN" },
798 		{ RTE_MBUF_F_RX_IP_CKSUM_BAD, RTE_MBUF_F_RX_IP_CKSUM_MASK, NULL },
799 		{ RTE_MBUF_F_RX_IP_CKSUM_GOOD, RTE_MBUF_F_RX_IP_CKSUM_MASK, NULL },
800 		{ RTE_MBUF_F_RX_IP_CKSUM_NONE, RTE_MBUF_F_RX_IP_CKSUM_MASK, NULL },
801 		{ RTE_MBUF_F_RX_IP_CKSUM_UNKNOWN, RTE_MBUF_F_RX_IP_CKSUM_MASK,
802 		  "RTE_MBUF_F_RX_IP_CKSUM_UNKNOWN" },
803 		{ RTE_MBUF_F_RX_OUTER_IP_CKSUM_BAD, RTE_MBUF_F_RX_OUTER_IP_CKSUM_BAD, NULL },
804 		{ RTE_MBUF_F_RX_VLAN_STRIPPED, RTE_MBUF_F_RX_VLAN_STRIPPED, NULL },
805 		{ RTE_MBUF_F_RX_IEEE1588_PTP, RTE_MBUF_F_RX_IEEE1588_PTP, NULL },
806 		{ RTE_MBUF_F_RX_IEEE1588_TMST, RTE_MBUF_F_RX_IEEE1588_TMST, NULL },
807 		{ RTE_MBUF_F_RX_FDIR_ID, RTE_MBUF_F_RX_FDIR_ID, NULL },
808 		{ RTE_MBUF_F_RX_FDIR_FLX, RTE_MBUF_F_RX_FDIR_FLX, NULL },
809 		{ RTE_MBUF_F_RX_QINQ_STRIPPED, RTE_MBUF_F_RX_QINQ_STRIPPED, NULL },
810 		{ RTE_MBUF_F_RX_LRO, RTE_MBUF_F_RX_LRO, NULL },
811 		{ RTE_MBUF_F_RX_SEC_OFFLOAD, RTE_MBUF_F_RX_SEC_OFFLOAD, NULL },
812 		{ RTE_MBUF_F_RX_SEC_OFFLOAD_FAILED, RTE_MBUF_F_RX_SEC_OFFLOAD_FAILED, NULL },
813 		{ RTE_MBUF_F_RX_QINQ, RTE_MBUF_F_RX_QINQ, NULL },
814 		{ RTE_MBUF_F_RX_OUTER_L4_CKSUM_BAD, RTE_MBUF_F_RX_OUTER_L4_CKSUM_MASK, NULL },
815 		{ RTE_MBUF_F_RX_OUTER_L4_CKSUM_GOOD, RTE_MBUF_F_RX_OUTER_L4_CKSUM_MASK,
816 		  NULL },
817 		{ RTE_MBUF_F_RX_OUTER_L4_CKSUM_INVALID, RTE_MBUF_F_RX_OUTER_L4_CKSUM_MASK,
818 		  NULL },
819 		{ RTE_MBUF_F_RX_OUTER_L4_CKSUM_UNKNOWN, RTE_MBUF_F_RX_OUTER_L4_CKSUM_MASK,
820 		  "RTE_MBUF_F_RX_OUTER_L4_CKSUM_UNKNOWN" },
821 	};
822 	const char *name;
823 	unsigned int i;
824 	int ret;
825 
826 	if (buflen == 0)
827 		return -1;
828 
829 	buf[0] = '\0';
830 	for (i = 0; i < RTE_DIM(rx_flags); i++) {
831 		if ((mask & rx_flags[i].mask) != rx_flags[i].flag)
832 			continue;
833 		name = rte_get_rx_ol_flag_name(rx_flags[i].flag);
834 		if (name == NULL)
835 			name = rx_flags[i].default_name;
836 		ret = snprintf(buf, buflen, "%s ", name);
837 		if (ret < 0)
838 			return -1;
839 		if ((size_t)ret >= buflen)
840 			return -1;
841 		buf += ret;
842 		buflen -= ret;
843 	}
844 
845 	return 0;
846 }
847 
848 /*
849  * Get the name of a TX offload flag. Must be kept synchronized with flag
850  * definitions in rte_mbuf.h.
851  */
rte_get_tx_ol_flag_name(uint64_t mask)852 const char *rte_get_tx_ol_flag_name(uint64_t mask)
853 {
854 	switch (mask) {
855 	case RTE_MBUF_F_TX_VLAN: return "RTE_MBUF_F_TX_VLAN";
856 	case RTE_MBUF_F_TX_IP_CKSUM: return "RTE_MBUF_F_TX_IP_CKSUM";
857 	case RTE_MBUF_F_TX_TCP_CKSUM: return "RTE_MBUF_F_TX_TCP_CKSUM";
858 	case RTE_MBUF_F_TX_SCTP_CKSUM: return "RTE_MBUF_F_TX_SCTP_CKSUM";
859 	case RTE_MBUF_F_TX_UDP_CKSUM: return "RTE_MBUF_F_TX_UDP_CKSUM";
860 	case RTE_MBUF_F_TX_IEEE1588_TMST: return "RTE_MBUF_F_TX_IEEE1588_TMST";
861 	case RTE_MBUF_F_TX_TCP_SEG: return "RTE_MBUF_F_TX_TCP_SEG";
862 	case RTE_MBUF_F_TX_IPV4: return "RTE_MBUF_F_TX_IPV4";
863 	case RTE_MBUF_F_TX_IPV6: return "RTE_MBUF_F_TX_IPV6";
864 	case RTE_MBUF_F_TX_OUTER_IP_CKSUM: return "RTE_MBUF_F_TX_OUTER_IP_CKSUM";
865 	case RTE_MBUF_F_TX_OUTER_IPV4: return "RTE_MBUF_F_TX_OUTER_IPV4";
866 	case RTE_MBUF_F_TX_OUTER_IPV6: return "RTE_MBUF_F_TX_OUTER_IPV6";
867 	case RTE_MBUF_F_TX_TUNNEL_VXLAN: return "RTE_MBUF_F_TX_TUNNEL_VXLAN";
868 	case RTE_MBUF_F_TX_TUNNEL_GTP: return "RTE_MBUF_F_TX_TUNNEL_GTP";
869 	case RTE_MBUF_F_TX_TUNNEL_GRE: return "RTE_MBUF_F_TX_TUNNEL_GRE";
870 	case RTE_MBUF_F_TX_TUNNEL_IPIP: return "RTE_MBUF_F_TX_TUNNEL_IPIP";
871 	case RTE_MBUF_F_TX_TUNNEL_GENEVE: return "RTE_MBUF_F_TX_TUNNEL_GENEVE";
872 	case RTE_MBUF_F_TX_TUNNEL_MPLSINUDP: return "RTE_MBUF_F_TX_TUNNEL_MPLSINUDP";
873 	case RTE_MBUF_F_TX_TUNNEL_VXLAN_GPE: return "RTE_MBUF_F_TX_TUNNEL_VXLAN_GPE";
874 	case RTE_MBUF_F_TX_TUNNEL_IP: return "RTE_MBUF_F_TX_TUNNEL_IP";
875 	case RTE_MBUF_F_TX_TUNNEL_UDP: return "RTE_MBUF_F_TX_TUNNEL_UDP";
876 	case RTE_MBUF_F_TX_QINQ: return "RTE_MBUF_F_TX_QINQ";
877 	case RTE_MBUF_F_TX_MACSEC: return "RTE_MBUF_F_TX_MACSEC";
878 	case RTE_MBUF_F_TX_SEC_OFFLOAD: return "RTE_MBUF_F_TX_SEC_OFFLOAD";
879 	case RTE_MBUF_F_TX_UDP_SEG: return "RTE_MBUF_F_TX_UDP_SEG";
880 	case RTE_MBUF_F_TX_OUTER_UDP_CKSUM: return "RTE_MBUF_F_TX_OUTER_UDP_CKSUM";
881 	default: return NULL;
882 	}
883 }
884 
885 /* write the list of tx ol flags in buffer buf */
886 int
rte_get_tx_ol_flag_list(uint64_t mask,char * buf,size_t buflen)887 rte_get_tx_ol_flag_list(uint64_t mask, char *buf, size_t buflen)
888 {
889 	const struct flag_mask tx_flags[] = {
890 		{ RTE_MBUF_F_TX_VLAN, RTE_MBUF_F_TX_VLAN, NULL },
891 		{ RTE_MBUF_F_TX_IP_CKSUM, RTE_MBUF_F_TX_IP_CKSUM, NULL },
892 		{ RTE_MBUF_F_TX_TCP_CKSUM, RTE_MBUF_F_TX_L4_MASK, NULL },
893 		{ RTE_MBUF_F_TX_SCTP_CKSUM, RTE_MBUF_F_TX_L4_MASK, NULL },
894 		{ RTE_MBUF_F_TX_UDP_CKSUM, RTE_MBUF_F_TX_L4_MASK, NULL },
895 		{ RTE_MBUF_F_TX_L4_NO_CKSUM, RTE_MBUF_F_TX_L4_MASK, "RTE_MBUF_F_TX_L4_NO_CKSUM" },
896 		{ RTE_MBUF_F_TX_IEEE1588_TMST, RTE_MBUF_F_TX_IEEE1588_TMST, NULL },
897 		{ RTE_MBUF_F_TX_TCP_SEG, RTE_MBUF_F_TX_TCP_SEG, NULL },
898 		{ RTE_MBUF_F_TX_IPV4, RTE_MBUF_F_TX_IPV4, NULL },
899 		{ RTE_MBUF_F_TX_IPV6, RTE_MBUF_F_TX_IPV6, NULL },
900 		{ RTE_MBUF_F_TX_OUTER_IP_CKSUM, RTE_MBUF_F_TX_OUTER_IP_CKSUM, NULL },
901 		{ RTE_MBUF_F_TX_OUTER_IPV4, RTE_MBUF_F_TX_OUTER_IPV4, NULL },
902 		{ RTE_MBUF_F_TX_OUTER_IPV6, RTE_MBUF_F_TX_OUTER_IPV6, NULL },
903 		{ RTE_MBUF_F_TX_TUNNEL_VXLAN, RTE_MBUF_F_TX_TUNNEL_MASK, NULL },
904 		{ RTE_MBUF_F_TX_TUNNEL_GTP, RTE_MBUF_F_TX_TUNNEL_MASK, NULL },
905 		{ RTE_MBUF_F_TX_TUNNEL_GRE, RTE_MBUF_F_TX_TUNNEL_MASK, NULL },
906 		{ RTE_MBUF_F_TX_TUNNEL_IPIP, RTE_MBUF_F_TX_TUNNEL_MASK, NULL },
907 		{ RTE_MBUF_F_TX_TUNNEL_GENEVE, RTE_MBUF_F_TX_TUNNEL_MASK, NULL },
908 		{ RTE_MBUF_F_TX_TUNNEL_MPLSINUDP, RTE_MBUF_F_TX_TUNNEL_MASK, NULL },
909 		{ RTE_MBUF_F_TX_TUNNEL_VXLAN_GPE, RTE_MBUF_F_TX_TUNNEL_MASK, NULL },
910 		{ RTE_MBUF_F_TX_TUNNEL_IP, RTE_MBUF_F_TX_TUNNEL_MASK, NULL },
911 		{ RTE_MBUF_F_TX_TUNNEL_UDP, RTE_MBUF_F_TX_TUNNEL_MASK, NULL },
912 		{ RTE_MBUF_F_TX_QINQ, RTE_MBUF_F_TX_QINQ, NULL },
913 		{ RTE_MBUF_F_TX_MACSEC, RTE_MBUF_F_TX_MACSEC, NULL },
914 		{ RTE_MBUF_F_TX_SEC_OFFLOAD, RTE_MBUF_F_TX_SEC_OFFLOAD, NULL },
915 		{ RTE_MBUF_F_TX_UDP_SEG, RTE_MBUF_F_TX_UDP_SEG, NULL },
916 		{ RTE_MBUF_F_TX_OUTER_UDP_CKSUM, RTE_MBUF_F_TX_OUTER_UDP_CKSUM, NULL },
917 	};
918 	const char *name;
919 	unsigned int i;
920 	int ret;
921 
922 	if (buflen == 0)
923 		return -1;
924 
925 	buf[0] = '\0';
926 	for (i = 0; i < RTE_DIM(tx_flags); i++) {
927 		if ((mask & tx_flags[i].mask) != tx_flags[i].flag)
928 			continue;
929 		name = rte_get_tx_ol_flag_name(tx_flags[i].flag);
930 		if (name == NULL)
931 			name = tx_flags[i].default_name;
932 		ret = snprintf(buf, buflen, "%s ", name);
933 		if (ret < 0)
934 			return -1;
935 		if ((size_t)ret >= buflen)
936 			return -1;
937 		buf += ret;
938 		buflen -= ret;
939 	}
940 
941 	return 0;
942 }
943