Lines Matching defs:abd
28 * See abd.c for a general overview of the arc buffered data (ABD).
154 #define abd_for_each_sg(abd, sg, n, i) \
155 for_each_sg(ABD_SCATTER(abd).abd_sgl, sg, n, i)
214 abd_t *abd = kmem_cache_alloc(abd_cache, KM_PUSHPAGE);
215 ASSERT3P(abd, !=, NULL);
218 return (abd);
222 abd_free_struct_impl(abd_t *abd)
224 kmem_cache_free(abd_cache, abd);
270 abd_alloc_chunks(abd_t *abd, size_t size)
344 ASSERT0(ABD_SCATTER(abd).abd_offset);
368 abd->abd_flags |= ABD_FLAG_LINEAR;
369 abd->abd_flags |= ABD_FLAG_LINEAR_PAGE;
370 abd->abd_u.abd_linear.abd_sgl = table.sgl;
371 ABD_LINEAR_BUF(abd) = page_address(sg_page(table.sgl));
374 abd->abd_flags |= ABD_FLAG_MULTI_CHUNK;
378 abd->abd_flags |= ABD_FLAG_MULTI_ZONE;
381 ABD_SCATTER(abd).abd_sgl = table.sgl;
382 ABD_SCATTER(abd).abd_nents = table.nents;
393 abd_alloc_chunks(abd_t *abd, size_t size)
408 ABD_SCATTER(abd).abd_sgl = table.sgl;
409 ABD_SCATTER(abd).abd_nents = nr_pages;
411 abd_for_each_sg(abd, sg, nr_pages, i) {
424 abd->abd_flags |= ABD_FLAG_MULTI_CHUNK;
434 abd_free_sg_table(abd_t *abd)
438 table.sgl = ABD_SCATTER(abd).abd_sgl;
439 table.nents = table.orig_nents = ABD_SCATTER(abd).abd_nents;
444 abd_free_chunks(abd_t *abd)
448 int nr_pages = ABD_SCATTER(abd).abd_nents;
451 if (abd->abd_flags & ABD_FLAG_MULTI_ZONE)
454 if (abd->abd_flags & ABD_FLAG_MULTI_CHUNK)
461 if (!abd_is_from_pages(abd)) {
462 abd_for_each_sg(abd, sg, nr_pages, i) {
472 abd_free_sg_table(abd);
529 abd_update_scatter_stats(abd_t *abd, abd_stats_op_t op)
532 int waste = P2ROUNDUP(abd->abd_size, PAGESIZE) - abd->abd_size;
535 ABDSTAT_INCR(abdstat_scatter_data_size, abd->abd_size);
540 ABDSTAT_INCR(abdstat_scatter_data_size, -(int)abd->abd_size);
547 abd_update_linear_stats(abd_t *abd, abd_stats_op_t op)
552 ABDSTAT_INCR(abdstat_linear_data_size, abd->abd_size);
555 ABDSTAT_INCR(abdstat_linear_data_size, -(int)abd->abd_size);
560 abd_verify_scatter(abd_t *abd)
562 ASSERT3U(ABD_SCATTER(abd).abd_nents, >, 0);
563 ASSERT3U(ABD_SCATTER(abd).abd_offset, <,
564 ABD_SCATTER(abd).abd_sgl->length);
568 size_t n = ABD_SCATTER(abd).abd_nents;
571 abd_for_each_sg(abd, sg, n, i) {
696 abd_free_linear_page(abd_t *abd)
699 struct scatterlist *sg = abd->abd_u.abd_linear.abd_sgl;
702 if (abd_is_from_pages(abd))
705 abd_update_scatter_stats(abd, ABDSTAT_DECR);
707 abd->abd_flags &= ~ABD_FLAG_LINEAR;
708 abd->abd_flags &= ~ABD_FLAG_LINEAR_PAGE;
709 ABD_SCATTER(abd).abd_nents = 1;
710 ABD_SCATTER(abd).abd_offset = 0;
711 ABD_SCATTER(abd).abd_sgl = sg;
712 abd_free_chunks(abd);
735 abd_t *abd = abd_alloc_struct(0);
736 abd->abd_flags |= ABD_FLAG_FROM_PAGES | ABD_FLAG_OWNER;
737 abd->abd_size = size;
757 abd->abd_flags |= ABD_FLAG_LINEAR | ABD_FLAG_LINEAR_PAGE;
758 abd->abd_u.abd_linear.abd_sgl = table.sgl;
760 ABD_LINEAR_BUF(abd) = sg_virt(table.sgl);
763 abd->abd_flags |= ABD_FLAG_MULTI_CHUNK;
765 ABD_SCATTER(abd).abd_offset = offset;
766 ABD_SCATTER(abd).abd_sgl = table.sgl;
767 ABD_SCATTER(abd).abd_nents = table.nents;
769 ASSERT0(ABD_SCATTER(abd).abd_offset);
772 return (abd);
793 abd_get_offset_scatter(abd_t *abd, abd_t *sabd, size_t off,
805 if (abd == NULL)
806 abd = abd_alloc_struct(0);
820 ABD_SCATTER(abd).abd_sgl = sg;
821 ABD_SCATTER(abd).abd_offset = new_offset;
822 ABD_SCATTER(abd).abd_nents = ABD_SCATTER(sabd).abd_nents - i;
825 abd->abd_flags |= ABD_FLAG_FROM_PAGES;
827 return (abd);
834 abd_iter_init(struct abd_iter *aiter, abd_t *abd)
836 ASSERT(!abd_is_gang(abd));
837 abd_verify(abd);
839 aiter->iter_abd = abd;
840 if (!abd_is_linear(abd)) {
841 aiter->iter_offset = ABD_SCATTER(abd).abd_offset;
842 aiter->iter_sg = ABD_SCATTER(abd).abd_sgl;
961 abd_borrow_buf(abd_t *abd, size_t n)
964 abd_verify(abd);
965 ASSERT3U(abd->abd_size, >=, 0);
972 if (abd_is_from_pages(abd)) {
974 } else if (abd_is_linear(abd)) {
975 buf = abd_to_buf(abd);
981 (void) zfs_refcount_add_many(&abd->abd_children, n, buf);
987 abd_borrow_buf_copy(abd_t *abd, size_t n)
989 void *buf = abd_borrow_buf(abd, n);
998 if (!abd_is_linear(abd) || abd_is_from_pages(abd)) {
999 abd_copy_to_buf(buf, abd, n);
1007 * buf to be copied back to abd, use abd_return_buf_copy() instead. If the
1018 abd_return_buf(abd_t *abd, void *buf, size_t n)
1020 abd_verify(abd);
1021 ASSERT3U(abd->abd_size, >=, n);
1023 (void) zfs_refcount_remove_many(&abd->abd_children, n, buf);
1025 if (abd_is_from_pages(abd)) {
1027 } else if (abd_is_linear(abd)) {
1028 ASSERT3P(buf, ==, abd_to_buf(abd));
1029 } else if (abd_is_gang(abd)) {
1039 for (abd_t *cabd = list_head(&ABD_GANG(abd).abd_gang_chain);
1041 cabd = list_next(&ABD_GANG(abd).abd_gang_chain, cabd)) {
1051 ASSERT0(abd_cmp_buf(abd, buf, n));
1057 abd_return_buf_copy(abd_t *abd, void *buf, size_t n)
1059 if (!abd_is_linear(abd) || abd_is_from_pages(abd)) {
1060 abd_copy_from_buf(abd, buf, n);
1062 abd_return_buf(abd, buf, n);
1179 * - the remaining space in the abd (which may not cover the entire
1201 * @off is the offset in @abd
1204 abd_nr_pages_off(abd_t *abd, unsigned int size, size_t off)
1208 if (abd_is_gang(abd)) {
1211 for (abd_t *cabd = abd_gang_get_offset(abd, &off);
1213 cabd = list_next(&ABD_GANG(abd).abd_gang_chain, cabd)) {
1223 if (abd_is_linear(abd))
1224 pos = (unsigned long)abd_to_buf(abd) + off;
1226 pos = ABD_SCATTER(abd).abd_offset + off;
1275 abd_gang_bio_map_off(struct bio *bio, abd_t *abd,
1278 ASSERT(abd_is_gang(abd));
1280 for (abd_t *cabd = abd_gang_get_offset(abd, &off);
1282 cabd = list_next(&ABD_GANG(abd).abd_gang_chain, cabd)) {
1297 * @off is the offset in @abd
1301 abd_bio_map_off(struct bio *bio, abd_t *abd,
1306 ASSERT3U(io_size, <=, abd->abd_size - off);
1307 if (abd_is_linear(abd))
1308 return (bio_map(bio, ((char *)abd_to_buf(abd)) + off, io_size));
1310 ASSERT(!abd_is_linear(abd));
1311 if (abd_is_gang(abd))
1312 return (abd_gang_bio_map_off(bio, abd, io_size, off));
1314 abd_iter_init(&aiter, abd);