Lines Matching full:cb
179 struct nvmf_tcp_command_buffer *cb;
181 cb = malloc(sizeof(*cb), M_NVMF_TCP, M_WAITOK);
182 cb->qp = qp;
183 cb->io = *io;
184 cb->data_offset = data_offset;
185 cb->data_len = data_len;
186 cb->data_xfered = 0;
187 refcount_init(&cb->refs, 1);
188 cb->error = 0;
189 cb->cid = cid;
190 cb->ttag = 0;
191 cb->tc = NULL;
193 return (cb);
197 tcp_hold_command_buffer(struct nvmf_tcp_command_buffer *cb)
199 refcount_acquire(&cb->refs);
203 tcp_free_command_buffer(struct nvmf_tcp_command_buffer *cb)
205 nvmf_complete_io_request(&cb->io, cb->data_xfered, cb->error);
206 if (cb->tc != NULL)
207 tcp_release_capsule(cb->tc);
208 free(cb, M_NVMF_TCP);
212 tcp_release_command_buffer(struct nvmf_tcp_command_buffer *cb)
214 if (refcount_release(&cb->refs))
215 tcp_free_command_buffer(cb);
220 struct nvmf_tcp_command_buffer *cb)
223 TAILQ_INSERT_HEAD(&list->head, cb, link);
230 struct nvmf_tcp_command_buffer *cb;
233 TAILQ_FOREACH(cb, &list->head, link) {
234 if (cb->cid == cid && cb->ttag == ttag)
235 return (cb);
242 struct nvmf_tcp_command_buffer *cb)
245 TAILQ_REMOVE(&list->head, cb, link);
252 struct nvmf_tcp_command_buffer *cb;
255 cb = tcp_find_command_buffer(list, cid, ttag);
256 if (cb != NULL) {
257 tcp_remove_command_buffer(list, cb);
259 tcp_release_command_buffer(cb);
508 struct nvmf_tcp_command_buffer *cb;
513 TAILQ_FOREACH(cb, &qp->rx_buffers.head, link) {
515 if (cb->tc->active_r2ts > qp->maxr2t)
518 cb->tc->pending_r2ts--;
520 TAILQ_REMOVE(&qp->rx_buffers.head, cb, link);
521 return (cb);
526 /* Allocate the next free transfer tag and assign it to cb. */
529 struct nvmf_tcp_command_buffer *cb)
550 cb->tc->active_r2ts++;
552 qp->open_ttags[ttag] = cb;
558 cb->ttag = ttag;
587 struct nvmf_tcp_command_buffer *cb)
592 MPASS(qp->open_ttags[cb->ttag] == cb);
595 qp->open_ttags[cb->ttag] = NULL;
597 cb->tc->active_r2ts--;
640 struct nvmf_tcp_command_buffer *cb;
668 cb = qp->open_ttags[ttag];
669 if (cb == NULL) {
678 MPASS(cb->ttag == ttag);
682 nvmf_tcp_send_next_r2t(qp, cb);
683 cb->error = EINTEGRITY;
684 tcp_release_command_buffer(cb);
701 if (data_offset < cb->data_offset ||
702 data_offset + data_len > cb->data_offset + cb->data_len) {
711 if (data_offset != cb->data_offset + cb->data_xfered) {
720 if ((cb->data_xfered + data_len == cb->data_len) !=
730 cb->data_xfered += data_len;
731 data_offset -= cb->data_offset;
732 if (cb->data_xfered == cb->data_len) {
733 nvmf_tcp_send_next_r2t(qp, cb);
735 tcp_hold_command_buffer(cb);
739 mbuf_copyto_io(pdu->m, pdu->hdr->pdo, data_len, &cb->io, data_offset);
741 tcp_release_command_buffer(cb);
750 struct nvmf_tcp_command_buffer *cb;
756 cb = tcp_find_command_buffer(&qp->rx_buffers, c2h->cccid, 0);
757 if (cb == NULL) {
773 cb->error = EINTEGRITY;
774 tcp_remove_command_buffer(&qp->rx_buffers, cb);
776 tcp_release_command_buffer(cb);
793 if (data_offset < cb->data_offset ||
794 data_offset + data_len > cb->data_offset + cb->data_len) {
803 if (data_offset != cb->data_offset + cb->data_xfered) {
812 if ((cb->data_xfered + data_len == cb->data_len) !=
822 cb->data_xfered += data_len;
823 data_offset -= cb->data_offset;
824 if (cb->data_xfered == cb->data_len)
825 tcp_remove_command_buffer(&qp->rx_buffers, cb);
827 tcp_hold_command_buffer(cb);
830 mbuf_copyto_io(pdu->m, pdu->hdr->pdo, data_len, &cb->io, data_offset);
832 tcp_release_command_buffer(cb);
855 struct nvmf_tcp_command_buffer *cb = m->m_ext.ext_arg1;
857 tcp_free_command_buffer(cb);
863 struct nvmf_tcp_command_buffer *cb = arg;
868 m_extaddref(m, data, len, &cb->refs, nvmf_tcp_mbuf_done, cb, NULL);
876 struct nvmf_tcp_command_buffer *cb = m->m_ext.ext_arg1;
879 tcp_release_command_buffer(cb);
885 struct nvmf_tcp_command_buffer *cb = arg;
889 m->m_ext.ext_arg1 = cb;
890 tcp_hold_command_buffer(cb);
905 nvmf_tcp_command_buffer_mbuf(struct nvmf_tcp_command_buffer *cb,
912 m = memdesc_alloc_ext_mbufs(&cb->io.io_mem, nvmf_tcp_mbuf,
913 nvmf_tcp_mext_pg, cb, M_WAITOK, data_offset, data_len, &len,
945 struct nvmf_tcp_command_buffer *cb;
951 cb = tcp_find_command_buffer(&qp->tx_buffers, r2t->cccid, 0);
952 if (cb == NULL) {
963 if (data_offset != cb->data_xfered) {
977 if (data_offset + data_len > cb->data_len) {
986 cb->data_xfered += data_len;
987 if (cb->data_xfered == cb->data_len)
988 tcp_remove_command_buffer(&qp->tx_buffers, cb);
990 tcp_hold_command_buffer(cb);
1002 m = nvmf_tcp_command_buffer_mbuf(cb, data_offset, todo, &sent,
1011 tcp_release_command_buffer(cb);
1222 struct nvmf_tcp_command_buffer *cb;
1229 cb = NULL;
1233 cb = tcp_alloc_command_buffer(qp, &nc->nc_data, 0,
1238 m = nvmf_tcp_command_buffer_mbuf(cb, 0,
1240 cb->data_xfered = nc->nc_data.io_len;
1241 tcp_release_command_buffer(cb);
1244 tcp_add_command_buffer(&qp->tx_buffers, cb);
1248 tcp_add_command_buffer(&qp->rx_buffers, cb);
1530 struct nvmf_tcp_command_buffer *ncb, *cb;
1566 cb = qp->open_ttags[i];
1567 if (cb != NULL) {
1568 cb->tc->active_r2ts--;
1569 cb->error = ECONNABORTED;
1570 tcp_release_command_buffer(cb);
1577 TAILQ_FOREACH_SAFE(cb, &qp->rx_buffers.head, link, ncb) {
1578 tcp_remove_command_buffer(&qp->rx_buffers, cb);
1581 if (cb->tc != NULL)
1582 cb->tc->pending_r2ts--;
1584 cb->error = ECONNABORTED;
1585 tcp_release_command_buffer(cb);
1591 TAILQ_FOREACH_SAFE(cb, &qp->tx_buffers.head, link, ncb) {
1592 tcp_remove_command_buffer(&qp->tx_buffers, cb);
1594 cb->error = ECONNABORTED;
1595 tcp_release_command_buffer(cb);
1707 struct nvmf_tcp_command_buffer *cb;
1709 cb = tcp_alloc_command_buffer(qp, io, data_offset, io->io_len,
1712 cb->tc = tc;
1726 TAILQ_INSERT_TAIL(&qp->rx_buffers.head, cb, link);
1731 nvmf_tcp_allocate_ttag(qp, cb);
1734 tcp_send_r2t(qp, nc->nc_sqe.cid, cb->ttag, data_offset, io->io_len);