Lines Matching full:ch
121 struct dpaa2_channel *ch = NULL;
148 ch = malloc(sizeof(struct dpaa2_channel), M_DPAA2_CH, M_WAITOK | M_ZERO);
149 ch->ni_dev = dev;
150 ch->io_dev = iodev;
151 ch->con_dev = condev;
152 ch->id = consc->attr.chan_id;
153 ch->flowid = flowid;
154 ch->tx_frames = 0; /* for debug purposes */
155 ch->tx_dropped = 0; /* for debug purposes */
156 ch->store_sz = 0;
157 ch->store_idx = 0;
158 ch->recycled_n = 0;
159 ch->rxq_n = 0;
161 NET_TASK_INIT(&ch->cleanup_task, 0, cleanup_task_fn, ch);
162 NET_TASK_INIT(&ch->bp_task, 0, dpaa2_chan_bp_task, ch);
164 ch->cleanup_tq = taskqueue_create("dpaa2_ch cleanup", M_WAITOK,
165 taskqueue_thread_enqueue, &ch->cleanup_tq);
166 taskqueue_start_threads_cpuset(&ch->cleanup_tq, 1, PI_NET,
167 &iosc->cpu_mask, "dpaa2_ch%d cleanup", ch->id);
169 error = dpaa2_chan_setup_dma(dev, ch, sc->buf_align);
175 mtx_init(&ch->xmit_mtx, "dpaa2_ch_xmit", NULL, MTX_DEF);
177 ch->xmit_br = buf_ring_alloc(DPAA2_TX_BUFRING_SZ, M_DEVBUF, M_NOWAIT,
178 &ch->xmit_mtx);
179 if (ch->xmit_br == NULL) {
185 DPAA2_BUF_INIT(&ch->store);
188 ctx = &ch->ctx;
191 ctx->fq_chan_id = ch->id;
192 ctx->io_dev = ch->io_dev;
193 ctx->channel = ch;
194 error = DPAA2_SWP_CONF_WQ_CHANNEL(ch->io_dev, ctx);
214 error = dpaa2_buf_seed_pool(dev, bpdev, ch, DPAA2_NI_BUFS_INIT,
221 error = dpaa2_chan_alloc_storage(dev, ch, DPAA2_ETH_STORE_SIZE,
228 ch->store_sz = DPAA2_ETH_STORE_FRAMES;
232 error = dpaa2_chan_setup_fq(dev, ch, DPAA2_NI_QUEUE_TX_CONF);
238 error = dpaa2_chan_setup_fq(dev, ch, DPAA2_NI_QUEUE_RX);
247 "priorities=%d\n", ioinfo->id, coninfo->id, ch->id,
251 *channel = ch;
259 if (ch->store.vaddr != NULL) {
260 bus_dmamem_free(ch->store.dmat, ch->store.vaddr, ch->store.dmap);
262 if (ch->store.dmat != NULL) {
263 bus_dma_tag_destroy(ch->store.dmat);
265 ch->store.dmat = NULL;
266 ch->store.vaddr = NULL;
267 ch->store.paddr = 0;
268 ch->store.nseg = 0;
270 buf_ring_free(ch->xmit_br, M_DEVBUF);
272 mtx_destroy(&ch->xmit_mtx);
274 /* while (taskqueue_cancel(ch->cleanup_tq, &ch->cleanup_task, NULL)) { */
275 /* taskqueue_drain(ch->cleanup_tq, &ch->cleanup_task); */
277 /* taskqueue_free(ch->cleanup_tq); */
291 dpaa2_chan_setup_fq(device_t dev, struct dpaa2_channel *ch,
300 fq = &ch->txc_queue;
301 fq->chan = ch;
302 fq->flowid = ch->flowid;
313 fq = &ch->rx_queues[i];
314 fq->chan = ch;
315 fq->flowid = ch->flowid;
319 ch->rxq_n++;
325 fq->chan = ch;
343 dpaa2_chan_next_frame(struct dpaa2_channel *ch, struct dpaa2_dq **dq)
345 struct dpaa2_buf *buf = &ch->store;
347 struct dpaa2_dq *msg = &msgs[ch->store_idx];
350 ch->store_idx++;
354 ch->store_idx = 0;
361 ch->store_idx = 0;
372 dpaa2_chan_setup_dma(device_t dev, struct dpaa2_channel *ch,
377 mtx_init(&ch->dma_mtx, "dpaa2_ch_dma_mtx", NULL, MTX_DEF);
391 &ch->rx_dmat);
409 &ch->tx_dmat);
427 &ch->sgt_dmat);
436 bus_dma_tag_destroy(ch->tx_dmat);
438 bus_dma_tag_destroy(ch->rx_dmat);
440 mtx_destroy(&ch->dma_mtx);
441 ch->rx_dmat = NULL;
442 ch->tx_dmat = NULL;
443 ch->sgt_dmat = NULL;
452 dpaa2_chan_alloc_storage(device_t dev, struct dpaa2_channel *ch, bus_size_t size,
455 struct dpaa2_buf *buf = &ch->store;
520 struct dpaa2_channel *ch = (struct dpaa2_channel *)arg;
521 struct dpaa2_ni_softc *sc = device_get_softc(ch->ni_dev);
533 error = DPAA2_SWP_QUERY_BP(ch->io_dev, bpsc->attr.bpid, &bpconf);
542 mtx_assert(&ch->dma_mtx, MA_NOTOWNED);
543 mtx_lock(&ch->dma_mtx);
544 (void)dpaa2_buf_seed_pool(ch->ni_dev, bpdev, ch, buf_num,
545 DPAA2_RX_BUF_SIZE, &ch->dma_mtx);
546 mtx_unlock(&ch->dma_mtx);