1 /*-
2 * Copyright (c) 2012 Semihalf.
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 * 1. Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in the
12 * documentation and/or other materials provided with the distribution.
13 *
14 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
15 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
16 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
17 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
18 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
19 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
20 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
21 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
22 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
23 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
24 * SUCH DAMAGE.
25 */
26
27 #include <sys/param.h>
28 #include <sys/systm.h>
29 #include <sys/kernel.h>
30 #include <sys/module.h>
31 #include <sys/bus.h>
32 #include <sys/rman.h>
33 #include <sys/malloc.h>
34 #include <sys/mbuf.h>
35 #include <sys/socket.h>
36 #include <sys/sysctl.h>
37 #include <sys/sockio.h>
38
39 #include <net/ethernet.h>
40 #include <net/if.h>
41 #include <net/if_dl.h>
42 #include <net/if_media.h>
43 #include <net/if_types.h>
44 #include <net/if_arp.h>
45
46 #include <dev/mii/mii.h>
47 #include <dev/mii/miivar.h>
48
49 #include "miibus_if.h"
50
51 #include <contrib/ncsw/inc/integrations/dpaa_integration_ext.h>
52 #include <contrib/ncsw/inc/Peripherals/fm_ext.h>
53 #include <contrib/ncsw/inc/Peripherals/fm_mac_ext.h>
54 #include <contrib/ncsw/inc/Peripherals/fm_port_ext.h>
55 #include <contrib/ncsw/inc/xx_ext.h>
56
57 #include "fman.h"
58 #include "bman.h"
59 #include "qman.h"
60 #include "if_dtsec.h"
61 #include "if_dtsec_rm.h"
62
63
64 /**
65 * @group dTSEC RM private defines.
66 * @{
67 */
68 #define DTSEC_BPOOLS_USED (1)
69 #define DTSEC_MAX_TX_QUEUE_LEN 256
70
71 struct dtsec_rm_frame_info {
72 struct mbuf *fi_mbuf;
73 t_DpaaSGTE fi_sgt[DPAA_NUM_OF_SG_TABLE_ENTRY];
74 };
75
76 enum dtsec_rm_pool_params {
77 DTSEC_RM_POOL_RX_LOW_MARK = 16,
78 DTSEC_RM_POOL_RX_HIGH_MARK = 64,
79 DTSEC_RM_POOL_RX_MAX_SIZE = 256,
80
81 DTSEC_RM_POOL_FI_LOW_MARK = 16,
82 DTSEC_RM_POOL_FI_HIGH_MARK = 64,
83 DTSEC_RM_POOL_FI_MAX_SIZE = 256,
84 };
85
86 #define DTSEC_RM_FQR_RX_CHANNEL e_QM_FQ_CHANNEL_POOL1
87 #define DTSEC_RM_FQR_TX_CONF_CHANNEL e_QM_FQ_CHANNEL_SWPORTAL0
88 enum dtsec_rm_fqr_params {
89 DTSEC_RM_FQR_RX_WQ = 1,
90 DTSEC_RM_FQR_TX_WQ = 1,
91 DTSEC_RM_FQR_TX_CONF_WQ = 1
92 };
93 /** @} */
94
95
96 /**
97 * @group dTSEC Frame Info routines.
98 * @{
99 */
100 void
dtsec_rm_fi_pool_free(struct dtsec_softc * sc)101 dtsec_rm_fi_pool_free(struct dtsec_softc *sc)
102 {
103
104 if (sc->sc_fi_zone != NULL)
105 uma_zdestroy(sc->sc_fi_zone);
106 }
107
108 int
dtsec_rm_fi_pool_init(struct dtsec_softc * sc)109 dtsec_rm_fi_pool_init(struct dtsec_softc *sc)
110 {
111
112 snprintf(sc->sc_fi_zname, sizeof(sc->sc_fi_zname), "%s: Frame Info",
113 device_get_nameunit(sc->sc_dev));
114
115 sc->sc_fi_zone = uma_zcreate(sc->sc_fi_zname,
116 sizeof(struct dtsec_rm_frame_info), NULL, NULL, NULL, NULL,
117 UMA_ALIGN_PTR, 0);
118
119 return (0);
120 }
121
122 static struct dtsec_rm_frame_info *
dtsec_rm_fi_alloc(struct dtsec_softc * sc)123 dtsec_rm_fi_alloc(struct dtsec_softc *sc)
124 {
125 struct dtsec_rm_frame_info *fi;
126
127 fi = uma_zalloc(sc->sc_fi_zone, M_NOWAIT);
128
129 return (fi);
130 }
131
132 static void
dtsec_rm_fi_free(struct dtsec_softc * sc,struct dtsec_rm_frame_info * fi)133 dtsec_rm_fi_free(struct dtsec_softc *sc, struct dtsec_rm_frame_info *fi)
134 {
135
136 uma_zfree(sc->sc_fi_zone, fi);
137 }
138 /** @} */
139
140
141 /**
142 * @group dTSEC FMan PORT routines.
143 * @{
144 */
145 int
dtsec_rm_fm_port_rx_init(struct dtsec_softc * sc,int unit)146 dtsec_rm_fm_port_rx_init(struct dtsec_softc *sc, int unit)
147 {
148 t_FmPortParams params;
149 t_FmPortRxParams *rx_params;
150 t_FmExtPools *pool_params;
151 t_Error error;
152
153 memset(¶ms, 0, sizeof(params));
154
155 params.baseAddr = sc->sc_fm_base + sc->sc_port_rx_hw_id;
156 params.h_Fm = sc->sc_fmh;
157 params.portType = dtsec_fm_port_rx_type(sc->sc_eth_dev_type);
158 params.portId = sc->sc_eth_id;
159 params.independentModeEnable = false;
160 params.liodnBase = FM_PORT_LIODN_BASE;
161 params.f_Exception = dtsec_fm_port_rx_exception_callback;
162 params.h_App = sc;
163
164 rx_params = ¶ms.specificParams.rxParams;
165 rx_params->errFqid = sc->sc_rx_fqid;
166 rx_params->dfltFqid = sc->sc_rx_fqid;
167 rx_params->liodnOffset = 0;
168
169 pool_params = &rx_params->extBufPools;
170 pool_params->numOfPoolsUsed = DTSEC_BPOOLS_USED;
171 pool_params->extBufPool->id = sc->sc_rx_bpid;
172 pool_params->extBufPool->size = FM_PORT_BUFFER_SIZE;
173
174 sc->sc_rxph = FM_PORT_Config(¶ms);
175 if (sc->sc_rxph == NULL) {
176 device_printf(sc->sc_dev, "couldn't configure FM Port RX.\n");
177 return (ENXIO);
178 }
179
180 error = FM_PORT_Init(sc->sc_rxph);
181 if (error != E_OK) {
182 device_printf(sc->sc_dev, "couldn't initialize FM Port RX.\n");
183 FM_PORT_Free(sc->sc_rxph);
184 return (ENXIO);
185 }
186
187 if (bootverbose)
188 device_printf(sc->sc_dev, "RX hw port 0x%02x initialized.\n",
189 sc->sc_port_rx_hw_id);
190
191 return (0);
192 }
193
194 int
dtsec_rm_fm_port_tx_init(struct dtsec_softc * sc,int unit)195 dtsec_rm_fm_port_tx_init(struct dtsec_softc *sc, int unit)
196 {
197 t_FmPortParams params;
198 t_FmPortNonRxParams *tx_params;
199 t_Error error;
200
201 memset(¶ms, 0, sizeof(params));
202
203 params.baseAddr = sc->sc_fm_base + sc->sc_port_tx_hw_id;
204 params.h_Fm = sc->sc_fmh;
205 params.portType = dtsec_fm_port_tx_type(sc->sc_eth_dev_type);
206 params.portId = sc->sc_eth_id;
207 params.independentModeEnable = false;
208 params.liodnBase = FM_PORT_LIODN_BASE;
209 params.f_Exception = dtsec_fm_port_tx_exception_callback;
210 params.h_App = sc;
211
212 tx_params = ¶ms.specificParams.nonRxParams;
213 tx_params->errFqid = sc->sc_tx_conf_fqid;
214 tx_params->dfltFqid = sc->sc_tx_conf_fqid;
215 tx_params->qmChannel = sc->sc_port_tx_qman_chan;
216 #ifdef FM_OP_PARTITION_ERRATA_FMANx8
217 tx_params->opLiodnOffset = 0;
218 #endif
219
220 sc->sc_txph = FM_PORT_Config(¶ms);
221 if (sc->sc_txph == NULL) {
222 device_printf(sc->sc_dev, "couldn't configure FM Port TX.\n");
223 return (ENXIO);
224 }
225
226 error = FM_PORT_Init(sc->sc_txph);
227 if (error != E_OK) {
228 device_printf(sc->sc_dev, "couldn't initialize FM Port TX.\n");
229 FM_PORT_Free(sc->sc_txph);
230 return (ENXIO);
231 }
232
233 if (bootverbose)
234 device_printf(sc->sc_dev, "TX hw port 0x%02x initialized.\n",
235 sc->sc_port_tx_hw_id);
236
237 return (0);
238 }
239 /** @} */
240
241
242 /**
243 * @group dTSEC buffer pools routines.
244 * @{
245 */
246 static t_Error
dtsec_rm_pool_rx_put_buffer(t_Handle h_BufferPool,uint8_t * buffer,t_Handle context)247 dtsec_rm_pool_rx_put_buffer(t_Handle h_BufferPool, uint8_t *buffer,
248 t_Handle context)
249 {
250 struct dtsec_softc *sc;
251
252 sc = h_BufferPool;
253 uma_zfree(sc->sc_rx_zone, buffer);
254
255 return (E_OK);
256 }
257
258 static uint8_t *
dtsec_rm_pool_rx_get_buffer(t_Handle h_BufferPool,t_Handle * context)259 dtsec_rm_pool_rx_get_buffer(t_Handle h_BufferPool, t_Handle *context)
260 {
261 struct dtsec_softc *sc;
262 uint8_t *buffer;
263
264 sc = h_BufferPool;
265 buffer = uma_zalloc(sc->sc_rx_zone, M_NOWAIT);
266
267 return (buffer);
268 }
269
270 static void
dtsec_rm_pool_rx_depleted(t_Handle h_App,bool in)271 dtsec_rm_pool_rx_depleted(t_Handle h_App, bool in)
272 {
273 struct dtsec_softc *sc;
274 unsigned int count;
275
276 sc = h_App;
277
278 if (!in)
279 return;
280
281 while (1) {
282 count = bman_count(sc->sc_rx_pool);
283 if (count > DTSEC_RM_POOL_RX_HIGH_MARK)
284 return;
285
286 bman_pool_fill(sc->sc_rx_pool, DTSEC_RM_POOL_RX_HIGH_MARK);
287 }
288 }
289
290 void
dtsec_rm_pool_rx_free(struct dtsec_softc * sc)291 dtsec_rm_pool_rx_free(struct dtsec_softc *sc)
292 {
293
294 if (sc->sc_rx_pool != NULL)
295 bman_pool_destroy(sc->sc_rx_pool);
296
297 if (sc->sc_rx_zone != NULL)
298 uma_zdestroy(sc->sc_rx_zone);
299 }
300
301 int
dtsec_rm_pool_rx_init(struct dtsec_softc * sc)302 dtsec_rm_pool_rx_init(struct dtsec_softc *sc)
303 {
304
305 /* FM_PORT_BUFFER_SIZE must be less than PAGE_SIZE */
306 CTASSERT(FM_PORT_BUFFER_SIZE < PAGE_SIZE);
307
308 snprintf(sc->sc_rx_zname, sizeof(sc->sc_rx_zname), "%s: RX Buffers",
309 device_get_nameunit(sc->sc_dev));
310
311 sc->sc_rx_zone = uma_zcreate(sc->sc_rx_zname, FM_PORT_BUFFER_SIZE, NULL,
312 NULL, NULL, NULL, FM_PORT_BUFFER_SIZE - 1, 0);
313
314 sc->sc_rx_pool = bman_pool_create(&sc->sc_rx_bpid, FM_PORT_BUFFER_SIZE,
315 0, 0, DTSEC_RM_POOL_RX_MAX_SIZE, dtsec_rm_pool_rx_get_buffer,
316 dtsec_rm_pool_rx_put_buffer, DTSEC_RM_POOL_RX_LOW_MARK,
317 DTSEC_RM_POOL_RX_HIGH_MARK, 0, 0, dtsec_rm_pool_rx_depleted, sc, NULL,
318 NULL);
319 if (sc->sc_rx_pool == NULL) {
320 device_printf(sc->sc_dev, "NULL rx pool somehow\n");
321 dtsec_rm_pool_rx_free(sc);
322 return (EIO);
323 }
324
325 return (0);
326 }
327 /** @} */
328
329
330 /**
331 * @group dTSEC Frame Queue Range routines.
332 * @{
333 */
334 static void
dtsec_rm_fqr_mext_free(struct mbuf * m)335 dtsec_rm_fqr_mext_free(struct mbuf *m)
336 {
337 struct dtsec_softc *sc;
338 void *buffer;
339
340 buffer = m->m_ext.ext_arg1;
341 sc = m->m_ext.ext_arg2;
342 if (bman_count(sc->sc_rx_pool) <= DTSEC_RM_POOL_RX_MAX_SIZE)
343 bman_put_buffer(sc->sc_rx_pool, buffer);
344 else
345 dtsec_rm_pool_rx_put_buffer(sc, buffer, NULL);
346 }
347
348 static e_RxStoreResponse
dtsec_rm_fqr_rx_callback(t_Handle app,t_Handle fqr,t_Handle portal,uint32_t fqid_off,t_DpaaFD * frame)349 dtsec_rm_fqr_rx_callback(t_Handle app, t_Handle fqr, t_Handle portal,
350 uint32_t fqid_off, t_DpaaFD *frame)
351 {
352 struct dtsec_softc *sc;
353 struct mbuf *m;
354 void *frame_va;
355
356 m = NULL;
357 sc = app;
358
359 frame_va = DPAA_FD_GET_ADDR(frame);
360 KASSERT(DPAA_FD_GET_FORMAT(frame) == e_DPAA_FD_FORMAT_TYPE_SHORT_SBSF,
361 ("%s(): Got unsupported frame format 0x%02X!", __func__,
362 DPAA_FD_GET_FORMAT(frame)));
363
364 KASSERT(DPAA_FD_GET_OFFSET(frame) == 0,
365 ("%s(): Only offset 0 is supported!", __func__));
366
367 if (DPAA_FD_GET_STATUS(frame) != 0) {
368 device_printf(sc->sc_dev, "RX error: 0x%08X\n",
369 DPAA_FD_GET_STATUS(frame));
370 goto err;
371 }
372
373 m = m_gethdr(M_NOWAIT, MT_HEADER);
374 if (m == NULL)
375 goto err;
376
377 m_extadd(m, frame_va, FM_PORT_BUFFER_SIZE,
378 dtsec_rm_fqr_mext_free, frame_va, sc, 0,
379 EXT_NET_DRV);
380
381 m->m_pkthdr.rcvif = sc->sc_ifnet;
382 m->m_len = DPAA_FD_GET_LENGTH(frame);
383 m_fixhdr(m);
384
385 if_input(sc->sc_ifnet, m);
386
387 return (e_RX_STORE_RESPONSE_CONTINUE);
388
389 err:
390 bman_put_buffer(sc->sc_rx_pool, frame_va);
391 if (m != NULL)
392 m_freem(m);
393
394 return (e_RX_STORE_RESPONSE_CONTINUE);
395 }
396
397 static e_RxStoreResponse
dtsec_rm_fqr_tx_confirm_callback(t_Handle app,t_Handle fqr,t_Handle portal,uint32_t fqid_off,t_DpaaFD * frame)398 dtsec_rm_fqr_tx_confirm_callback(t_Handle app, t_Handle fqr, t_Handle portal,
399 uint32_t fqid_off, t_DpaaFD *frame)
400 {
401 struct dtsec_rm_frame_info *fi;
402 struct dtsec_softc *sc;
403 unsigned int qlen;
404 t_DpaaSGTE *sgt0;
405
406 sc = app;
407
408 if (DPAA_FD_GET_STATUS(frame) != 0)
409 device_printf(sc->sc_dev, "TX error: 0x%08X\n",
410 DPAA_FD_GET_STATUS(frame));
411
412 /*
413 * We are storing struct dtsec_rm_frame_info in first entry
414 * of scatter-gather table.
415 */
416 sgt0 = DPAA_FD_GET_ADDR(frame);
417 fi = DPAA_SGTE_GET_ADDR(sgt0);
418
419 /* Free transmitted frame */
420 m_freem(fi->fi_mbuf);
421 dtsec_rm_fi_free(sc, fi);
422
423 qlen = qman_fqr_get_counter(sc->sc_tx_conf_fqr, 0,
424 e_QM_FQR_COUNTERS_FRAME);
425
426 if (qlen == 0) {
427 DTSEC_LOCK(sc);
428
429 if (sc->sc_tx_fqr_full) {
430 sc->sc_tx_fqr_full = 0;
431 dtsec_rm_if_start_locked(sc);
432 }
433
434 DTSEC_UNLOCK(sc);
435 }
436
437 return (e_RX_STORE_RESPONSE_CONTINUE);
438 }
439
440 void
dtsec_rm_fqr_rx_free(struct dtsec_softc * sc)441 dtsec_rm_fqr_rx_free(struct dtsec_softc *sc)
442 {
443
444 if (sc->sc_rx_fqr)
445 qman_fqr_free(sc->sc_rx_fqr);
446 }
447
448 int
dtsec_rm_fqr_rx_init(struct dtsec_softc * sc)449 dtsec_rm_fqr_rx_init(struct dtsec_softc *sc)
450 {
451 t_Error error;
452 t_Handle fqr;
453
454 /* Default Frame Queue */
455 fqr = qman_fqr_create(1, DTSEC_RM_FQR_RX_CHANNEL, DTSEC_RM_FQR_RX_WQ,
456 false, 0, false, false, true, false, 0, 0, 0);
457 if (fqr == NULL) {
458 device_printf(sc->sc_dev, "could not create default RX queue"
459 "\n");
460 return (EIO);
461 }
462
463 sc->sc_rx_fqr = fqr;
464 sc->sc_rx_fqid = qman_fqr_get_base_fqid(fqr);
465
466 error = qman_fqr_register_cb(fqr, dtsec_rm_fqr_rx_callback, sc);
467 if (error != E_OK) {
468 device_printf(sc->sc_dev, "could not register RX callback\n");
469 dtsec_rm_fqr_rx_free(sc);
470 return (EIO);
471 }
472
473 return (0);
474 }
475
476 void
dtsec_rm_fqr_tx_free(struct dtsec_softc * sc)477 dtsec_rm_fqr_tx_free(struct dtsec_softc *sc)
478 {
479
480 if (sc->sc_tx_fqr)
481 qman_fqr_free(sc->sc_tx_fqr);
482
483 if (sc->sc_tx_conf_fqr)
484 qman_fqr_free(sc->sc_tx_conf_fqr);
485 }
486
487 int
dtsec_rm_fqr_tx_init(struct dtsec_softc * sc)488 dtsec_rm_fqr_tx_init(struct dtsec_softc *sc)
489 {
490 t_Error error;
491 t_Handle fqr;
492
493 /* TX Frame Queue */
494 fqr = qman_fqr_create(1, sc->sc_port_tx_qman_chan,
495 DTSEC_RM_FQR_TX_WQ, false, 0, false, false, true, false, 0, 0, 0);
496 if (fqr == NULL) {
497 device_printf(sc->sc_dev, "could not create default TX queue"
498 "\n");
499 return (EIO);
500 }
501
502 sc->sc_tx_fqr = fqr;
503
504 /* TX Confirmation Frame Queue */
505 fqr = qman_fqr_create(1, DTSEC_RM_FQR_TX_CONF_CHANNEL,
506 DTSEC_RM_FQR_TX_CONF_WQ, false, 0, false, false, true, false, 0, 0,
507 0);
508 if (fqr == NULL) {
509 device_printf(sc->sc_dev, "could not create TX confirmation "
510 "queue\n");
511 dtsec_rm_fqr_tx_free(sc);
512 return (EIO);
513 }
514
515 sc->sc_tx_conf_fqr = fqr;
516 sc->sc_tx_conf_fqid = qman_fqr_get_base_fqid(fqr);
517
518 error = qman_fqr_register_cb(fqr, dtsec_rm_fqr_tx_confirm_callback, sc);
519 if (error != E_OK) {
520 device_printf(sc->sc_dev, "could not register TX confirmation "
521 "callback\n");
522 dtsec_rm_fqr_tx_free(sc);
523 return (EIO);
524 }
525
526 return (0);
527 }
528 /** @} */
529
530
531 /**
532 * @group dTSEC IFnet routines.
533 * @{
534 */
535 void
dtsec_rm_if_start_locked(struct dtsec_softc * sc)536 dtsec_rm_if_start_locked(struct dtsec_softc *sc)
537 {
538 vm_size_t dsize, psize, ssize;
539 struct dtsec_rm_frame_info *fi;
540 unsigned int qlen, i;
541 struct mbuf *m0, *m;
542 vm_offset_t vaddr;
543 t_DpaaFD fd;
544
545 DTSEC_LOCK_ASSERT(sc);
546 /* TODO: IFF_DRV_OACTIVE */
547
548 if ((sc->sc_mii->mii_media_status & IFM_ACTIVE) == 0)
549 return;
550
551 if ((if_getdrvflags(sc->sc_ifnet) & IFF_DRV_RUNNING) != IFF_DRV_RUNNING)
552 return;
553
554 while (!if_sendq_empty(sc->sc_ifnet)) {
555 /* Check length of the TX queue */
556 qlen = qman_fqr_get_counter(sc->sc_tx_fqr, 0,
557 e_QM_FQR_COUNTERS_FRAME);
558
559 if (qlen >= DTSEC_MAX_TX_QUEUE_LEN) {
560 sc->sc_tx_fqr_full = 1;
561 return;
562 }
563
564 fi = dtsec_rm_fi_alloc(sc);
565 if (fi == NULL)
566 return;
567
568 m0 = if_dequeue(sc->sc_ifnet);
569 if (m0 == NULL) {
570 dtsec_rm_fi_free(sc, fi);
571 return;
572 }
573
574 i = 0;
575 m = m0;
576 psize = 0;
577 dsize = 0;
578 fi->fi_mbuf = m0;
579 while (m && i < DPAA_NUM_OF_SG_TABLE_ENTRY) {
580 if (m->m_len == 0)
581 continue;
582
583 /*
584 * First entry in scatter-gather table is used to keep
585 * pointer to frame info structure.
586 */
587 DPAA_SGTE_SET_ADDR(&fi->fi_sgt[i], (void *)fi);
588 DPAA_SGTE_SET_LENGTH(&fi->fi_sgt[i], 0);
589
590 DPAA_SGTE_SET_EXTENSION(&fi->fi_sgt[i], 0);
591 DPAA_SGTE_SET_FINAL(&fi->fi_sgt[i], 0);
592 DPAA_SGTE_SET_BPID(&fi->fi_sgt[i], 0);
593 DPAA_SGTE_SET_OFFSET(&fi->fi_sgt[i], 0);
594 i++;
595
596 dsize = m->m_len;
597 vaddr = (vm_offset_t)m->m_data;
598 while (dsize > 0 && i < DPAA_NUM_OF_SG_TABLE_ENTRY) {
599 ssize = PAGE_SIZE - (vaddr & PAGE_MASK);
600 if (m->m_len < ssize)
601 ssize = m->m_len;
602
603 DPAA_SGTE_SET_ADDR(&fi->fi_sgt[i],
604 (void *)vaddr);
605 DPAA_SGTE_SET_LENGTH(&fi->fi_sgt[i], ssize);
606
607 DPAA_SGTE_SET_EXTENSION(&fi->fi_sgt[i], 0);
608 DPAA_SGTE_SET_FINAL(&fi->fi_sgt[i], 0);
609 DPAA_SGTE_SET_BPID(&fi->fi_sgt[i], 0);
610 DPAA_SGTE_SET_OFFSET(&fi->fi_sgt[i], 0);
611
612 dsize -= ssize;
613 vaddr += ssize;
614 psize += ssize;
615 i++;
616 }
617
618 if (dsize > 0)
619 break;
620
621 m = m->m_next;
622 }
623
624 /* Check if SG table was constructed properly */
625 if (m != NULL || dsize != 0) {
626 dtsec_rm_fi_free(sc, fi);
627 m_freem(m0);
628 continue;
629 }
630
631 DPAA_SGTE_SET_FINAL(&fi->fi_sgt[i-1], 1);
632
633 DPAA_FD_SET_ADDR(&fd, fi->fi_sgt);
634 DPAA_FD_SET_LENGTH(&fd, psize);
635 DPAA_FD_SET_FORMAT(&fd, e_DPAA_FD_FORMAT_TYPE_SHORT_MBSF);
636
637 fd.liodn = 0;
638 fd.bpid = 0;
639 fd.elion = 0;
640 DPAA_FD_SET_OFFSET(&fd, 0);
641 DPAA_FD_SET_STATUS(&fd, 0);
642
643 DTSEC_UNLOCK(sc);
644 if (qman_fqr_enqueue(sc->sc_tx_fqr, 0, &fd) != E_OK) {
645 dtsec_rm_fi_free(sc, fi);
646 m_freem(m0);
647 }
648 DTSEC_LOCK(sc);
649 }
650 }
651 /** @} */
652