Lines Matching refs:req

157 static void _start_readv_request(struct spdk_reduce_vol_request *req);
158 static void _start_writev_request(struct spdk_reduce_vol_request *req);
371 struct spdk_reduce_vol_request *req;
394 vol->request_mem = calloc(REDUCE_NUM_VOL_REQUESTS, sizeof(*req));
430 req = &vol->request_mem[i];
431 TAILQ_INSERT_HEAD(&vol->free_requests, req, tailq);
432 req->backing_io = (struct spdk_reduce_backing_io *)(vol->buf_backing_io_mem + i *
436 req->decomp_buf_iov = &vol->buf_iov_mem[(2 * i) * vol->backing_io_units_per_chunk];
437 req->comp_buf_iov = &vol->buf_iov_mem[(2 * i + 1) * vol->backing_io_units_per_chunk];
439 rc = _set_buffer(&req->comp_buf, &buffer, buffer_end, vol->params.chunk_size);
441 SPDK_ERRLOG("Failed to set comp buffer for req idx %u, addr %p, start %p, end %p\n", i, buffer,
445 rc = _set_buffer(&req->decomp_buf, &buffer, buffer_end, vol->params.chunk_size);
447 SPDK_ERRLOG("Failed to set decomp buffer for req idx %u, addr %p, start %p, end %p\n", i, buffer,
1135 _reduce_vol_complete_req(struct spdk_reduce_vol_request *req, int reduce_errno)
1138 struct spdk_reduce_vol *vol = req->vol;
1140 req->cb_fn(req->cb_arg, reduce_errno);
1141 RB_REMOVE(executing_req_tree, &vol->executing_requests, req);
1144 if (next_req->logical_map_index == req->logical_map_index) {
1158 TAILQ_INSERT_HEAD(&vol->free_requests, req, tailq);
1195 struct spdk_reduce_vol_request *req = _req;
1196 struct spdk_reduce_vol *vol = req->vol;
1200 req->reduce_errno = reduce_errno;
1203 assert(req->num_backing_ops > 0);
1204 if (--req->num_backing_ops > 0) {
1208 if (req->reduce_errno != 0) {
1209 _reduce_vol_reset_chunk(vol, req->chunk_map_index);
1210 _reduce_vol_complete_req(req, req->reduce_errno);
1214 old_chunk_map_index = vol->pm_logical_map[req->logical_map_index];
1226 _reduce_persist(vol, req->chunk,
1229 vol->pm_logical_map[req->logical_map_index] = req->chunk_map_index;
1231 _reduce_persist(vol, &vol->pm_logical_map[req->logical_map_index], sizeof(uint64_t));
1233 _reduce_vol_complete_req(req, 0);
1237 _reduce_vol_req_get_backing_io(struct spdk_reduce_vol_request *req, uint32_t index)
1239 struct spdk_reduce_backing_dev *backing_dev = req->vol->backing_dev;
1242 backing_io = (struct spdk_reduce_backing_io *)((uint8_t *)req->backing_io +
1255 _issue_backing_ops_without_merge(struct spdk_reduce_vol_request *req, struct spdk_reduce_vol *vol,
1263 if (req->chunk_is_compressed) {
1264 iov = req->comp_buf_iov;
1265 buf = req->comp_buf;
1267 iov = req->decomp_buf_iov;
1268 buf = req->decomp_buf;
1271 req->num_backing_ops = req->num_io_units;
1272 req->backing_cb_args.cb_fn = next_fn;
1273 req->backing_cb_args.cb_arg = req;
1274 for (i = 0; i < req->num_io_units; i++) {
1275 backing_io = _reduce_vol_req_get_backing_io(req, i);
1281 backing_io->lba = req->chunk->io_unit_index[i] * vol->backing_lba_per_io_unit;
1283 backing_io->backing_cb_args = &req->backing_cb_args;
1294 _issue_backing_ops(struct spdk_reduce_vol_request *req, struct spdk_reduce_vol *vol,
1312 _issue_backing_ops_without_merge(req, vol, next_fn, is_write);
1316 if (req->chunk_is_compressed) {
1317 iov = req->comp_buf_iov;
1318 buf = req->comp_buf;
1320 iov = req->decomp_buf_iov;
1321 buf = req->decomp_buf;
1324 for (i = 0; i < req->num_io_units; i++) {
1326 merged_io_desc[merged_io_idx].io_unit_index = req->chunk->io_unit_index[i];
1331 if (i + 1 == req->num_io_units) {
1335 if (req->chunk->io_unit_index[i] + 1 == req->chunk->io_unit_index[i + 1]) {
1344 req->num_backing_ops = num_io;
1345 req->backing_cb_args.cb_fn = next_fn;
1346 req->backing_cb_args.cb_arg = req;
1348 backing_io = _reduce_vol_req_get_backing_io(req, i);
1356 backing_io->backing_cb_args = &req->backing_cb_args;
1370 _reduce_vol_write_chunk(struct spdk_reduce_vol_request *req, reduce_request_fn next_fn,
1373 struct spdk_reduce_vol *vol = req->vol;
1382 req->chunk_map_index = free_index;
1384 req->chunk_map_index = spdk_bit_array_find_first_clear(vol->allocated_chunk_maps,
1386 vol->find_chunk_offset = req->chunk_map_index + 1;
1392 assert(req->chunk_map_index != REDUCE_EMPTY_MAP_ENTRY);
1393 spdk_bit_array_set(vol->allocated_chunk_maps, req->chunk_map_index);
1395 req->chunk = _reduce_vol_get_chunk_map(vol, req->chunk_map_index);
1396 req->num_io_units = spdk_divide_round_up(compressed_size,
1398 req->chunk_is_compressed = (req->num_io_units != vol->backing_io_units_per_chunk);
1399 req->chunk->compressed_size =
1400 req->chunk_is_compressed ? compressed_size : vol->params.chunk_size;
1403 if (req->chunk_is_compressed == false) {
1404 chunk_offset = req->offset % vol->logical_blocks_per_chunk;
1405 buf = req->decomp_buf;
1409 if (req->rmw == false && chunk_offset) {
1415 for (j = 0; j < req->iovcnt; j++) {
1416 memcpy(buf, req->iov[j].iov_base, req->iov[j].iov_len);
1417 buf += req->iov[j].iov_len;
1418 total_len += req->iov[j].iov_len;
1424 if (req->rmw == false && remainder) {
1430 for (i = 0; i < req->num_io_units; i++) {
1433 req->chunk->io_unit_index[i] = free_index;
1435 req->chunk->io_unit_index[i] = spdk_bit_array_find_first_clear(vol->allocated_backing_io_units,
1437 vol->find_block_offset = req->chunk->io_unit_index[i] + 1;
1442 assert(req->chunk->io_unit_index[i] != REDUCE_EMPTY_MAP_ENTRY);
1443 spdk_bit_array_set(vol->allocated_backing_io_units, req->chunk->io_unit_index[i]);
1447 _issue_backing_ops(req, vol, next_fn, true /* write */);
1453 struct spdk_reduce_vol_request *req = _req;
1462 req->backing_cb_args.output_size = req->vol->params.chunk_size;
1465 _reduce_vol_write_chunk(req, _write_write_done, req->backing_cb_args.output_size);
1469 _reduce_vol_compress_chunk(struct spdk_reduce_vol_request *req, reduce_request_fn next_fn)
1471 struct spdk_reduce_vol *vol = req->vol;
1473 req->backing_cb_args.cb_fn = next_fn;
1474 req->backing_cb_args.cb_arg = req;
1475 req->comp_buf_iov[0].iov_base = req->comp_buf;
1476 req->comp_buf_iov[0].iov_len = vol->params.chunk_size;
1478 req->decomp_iov, req->decomp_iovcnt, req->comp_buf_iov, 1,
1479 &req->backing_cb_args);
1483 _reduce_vol_decompress_chunk_scratch(struct spdk_reduce_vol_request *req, reduce_request_fn next_fn)
1485 struct spdk_reduce_vol *vol = req->vol;
1487 req->backing_cb_args.cb_fn = next_fn;
1488 req->backing_cb_args.cb_arg = req;
1489 req->comp_buf_iov[0].iov_base = req->comp_buf;
1490 req->comp_buf_iov[0].iov_len = req->chunk->compressed_size;
1491 req->decomp_buf_iov[0].iov_base = req->decomp_buf;
1492 req->decomp_buf_iov[0].iov_len = vol->params.chunk_size;
1494 req->comp_buf_iov, 1, req->decomp_buf_iov, 1,
1495 &req->backing_cb_args);
1499 _reduce_vol_decompress_chunk(struct spdk_reduce_vol_request *req, reduce_request_fn next_fn)
1501 struct spdk_reduce_vol *vol = req->vol;
1507 req->decomp_iovcnt = 0;
1508 chunk_offset = req->offset % vol->logical_blocks_per_chunk;
1515 iov_len = req->iov[0].iov_len;
1516 req->copy_after_decompress = !vol->backing_dev->sgl_out && (req->iovcnt > 1 ||
1517 req->iov[0].iov_len < vol->params.chunk_size ||
1518 _addr_crosses_huge_page(req->iov[0].iov_base, &iov_len));
1519 if (req->copy_after_decompress) {
1520 req->decomp_iov[0].iov_base = req->decomp_buf;
1521 req->decomp_iov[0].iov_len = vol->params.chunk_size;
1522 req->decomp_iovcnt = 1;
1528 req->decomp_iov[0].iov_base = req->decomp_buf;
1529 req->decomp_iov[0].iov_len = chunk_offset * vol->params.logical_block_size;
1530 ttl_len += req->decomp_iov[0].iov_len;
1531 req->decomp_iovcnt = 1;
1535 for (i = 0; i < req->iovcnt; i++) {
1536 req->decomp_iov[i + req->decomp_iovcnt].iov_base = req->iov[i].iov_base;
1537 req->decomp_iov[i + req->decomp_iovcnt].iov_len = req->iov[i].iov_len;
1538 ttl_len += req->decomp_iov[i + req->decomp_iovcnt].iov_len;
1540 req->decomp_iovcnt += req->iovcnt;
1545 req->decomp_iov[req->decomp_iovcnt].iov_base = req->decomp_buf + ttl_len;
1546 req->decomp_iov[req->decomp_iovcnt].iov_len = remainder;
1547 ttl_len += req->decomp_iov[req->decomp_iovcnt].iov_len;
1548 req->decomp_iovcnt++;
1553 assert(!req->copy_after_decompress || (req->copy_after_decompress && req->decomp_iovcnt == 1));
1554 req->backing_cb_args.cb_fn = next_fn;
1555 req->backing_cb_args.cb_arg = req;
1556 req->comp_buf_iov[0].iov_base = req->comp_buf;
1557 req->comp_buf_iov[0].iov_len = req->chunk->compressed_size;
1559 req->comp_buf_iov, 1, req->decomp_iov, req->decomp_iovcnt,
1560 &req->backing_cb_args);
1564 _prepare_compress_chunk_copy_user_buffers(struct spdk_reduce_vol_request *req, bool zero_paddings)
1566 struct spdk_reduce_vol *vol = req->vol;
1573 req->decomp_iov[0].iov_base = req->decomp_buf;
1574 req->decomp_iov[0].iov_len = vol->params.chunk_size;
1575 req->decomp_iovcnt = 1;
1576 copy_offset = req->decomp_iov[0].iov_base;
1577 chunk_offset = req->offset % vol->logical_blocks_per_chunk;
1589 for (i = 0; i < req->iovcnt; i++) {
1590 memcpy(copy_offset, req->iov[i].iov_base, req->iov[i].iov_len);
1591 copy_offset += req->iov[i].iov_len;
1592 ttl_len += req->iov[i].iov_len;
1604 assert(ttl_len == req->vol->params.chunk_size);
1611 _prepare_compress_chunk(struct spdk_reduce_vol_request *req, bool zero_paddings)
1613 struct spdk_reduce_vol *vol = req->vol;
1614 char *padding_buffer = zero_paddings ? g_zero_buf : req->decomp_buf;
1626 iov_len = req->iov[0].iov_len;
1627 if (!vol->backing_dev->sgl_in && (req->iovcnt > 1 ||
1628 req->iov[0].iov_len < vol->params.chunk_size ||
1629 _addr_crosses_huge_page(req->iov[0].iov_base, &iov_len))) {
1630 _prepare_compress_chunk_copy_user_buffers(req, zero_paddings);
1634 req->decomp_iovcnt = 0;
1635 chunk_offset = req->offset % vol->logical_blocks_per_chunk;
1639 req->decomp_iov[0].iov_base = padding_buffer;
1640 req->decomp_iov[0].iov_len = ttl_len;
1641 req->decomp_iovcnt = 1;
1645 for (i = 0; i < req->iovcnt; i++) {
1646 req->decomp_iov[i + req->decomp_iovcnt].iov_base = req->iov[i].iov_base;
1647 req->decomp_iov[i + req->decomp_iovcnt].iov_len = req->iov[i].iov_len;
1648 ttl_len += req->iov[i].iov_len;
1650 req->decomp_iovcnt += req->iovcnt;
1654 req->decomp_iov[req->decomp_iovcnt].iov_base = padding_buffer + ttl_len;
1655 req->decomp_iov[req->decomp_iovcnt].iov_len = remainder;
1656 req->decomp_iovcnt++;
1659 assert(ttl_len == req->vol->params.chunk_size);
1665 struct spdk_reduce_vol_request *req = _req;
1669 _reduce_vol_complete_req(req, reduce_errno);
1676 if (req->backing_cb_args.output_size != req->vol->params.chunk_size) {
1677 _reduce_vol_complete_req(req, -EIO);
1681 _prepare_compress_chunk(req, false);
1682 _reduce_vol_compress_chunk(req, _write_compress_done);
1688 struct spdk_reduce_vol_request *req = _req;
1691 req->reduce_errno = reduce_errno;
1694 assert(req->num_backing_ops > 0);
1695 if (--req->num_backing_ops > 0) {
1699 if (req->reduce_errno != 0) {
1700 _reduce_vol_complete_req(req, req->reduce_errno);
1704 if (req->chunk_is_compressed) {
1705 _reduce_vol_decompress_chunk_scratch(req, _write_decompress_done);
1707 req->backing_cb_args.output_size = req->chunk->compressed_size;
1709 _write_decompress_done(req, 0);
1716 struct spdk_reduce_vol_request *req = _req;
1717 struct spdk_reduce_vol *vol = req->vol;
1721 _reduce_vol_complete_req(req, reduce_errno);
1728 if (req->backing_cb_args.output_size != vol->params.chunk_size) {
1729 _reduce_vol_complete_req(req, -EIO);
1733 if (req->copy_after_decompress) {
1734 uint64_t chunk_offset = req->offset % vol->logical_blocks_per_chunk;
1735 char *decomp_buffer = (char *)req->decomp_buf + chunk_offset * vol->params.logical_block_size;
1738 for (i = 0; i < req->iovcnt; i++) {
1739 memcpy(req->iov[i].iov_base, decomp_buffer, req->iov[i].iov_len);
1740 decomp_buffer += req->iov[i].iov_len;
1741 assert(decomp_buffer <= (char *)req->decomp_buf + vol->params.chunk_size);
1745 _reduce_vol_complete_req(req, 0);
1751 struct spdk_reduce_vol_request *req = _req;
1754 req->reduce_errno = reduce_errno;
1757 assert(req->num_backing_ops > 0);
1758 if (--req->num_backing_ops > 0) {
1762 if (req->reduce_errno != 0) {
1763 _reduce_vol_complete_req(req, req->reduce_errno);
1767 if (req->chunk_is_compressed) {
1768 _reduce_vol_decompress_chunk(req, _read_decompress_done);
1773 * from req->decomp_buf.
1775 req->copy_after_decompress = true;
1776 req->backing_cb_args.output_size = req->chunk->compressed_size;
1778 _read_decompress_done(req, 0);
1783 _reduce_vol_read_chunk(struct spdk_reduce_vol_request *req, reduce_request_fn next_fn)
1785 struct spdk_reduce_vol *vol = req->vol;
1787 req->chunk_map_index = vol->pm_logical_map[req->logical_map_index];
1788 assert(req->chunk_map_index != REDUCE_EMPTY_MAP_ENTRY);
1790 req->chunk = _reduce_vol_get_chunk_map(vol, req->chunk_map_index);
1791 req->num_io_units = spdk_divide_round_up(req->chunk->compressed_size,
1793 req->chunk_is_compressed = (req->num_io_units != vol->backing_io_units_per_chunk);
1795 _issue_backing_ops(req, vol, next_fn, false /* read */);
1819 struct spdk_reduce_vol_request req;
1821 req.logical_map_index = logical_map_index;
1823 return (NULL != RB_FIND(executing_req_tree, &vol->executing_requests, &req));
1827 _start_readv_request(struct spdk_reduce_vol_request *req)
1829 RB_INSERT(executing_req_tree, &req->vol->executing_requests, req);
1830 _reduce_vol_read_chunk(req, _read_read_done);
1838 struct spdk_reduce_vol_request *req;
1874 req = TAILQ_FIRST(&vol->free_requests);
1875 if (req == NULL) {
1880 TAILQ_REMOVE(&vol->free_requests, req, tailq);
1881 req->type = REDUCE_IO_READV;
1882 req->vol = vol;
1883 req->iov = iov;
1884 req->iovcnt = iovcnt;
1885 req->offset = offset;
1886 req->logical_map_index = logical_map_index;
1887 req->length = length;
1888 req->copy_after_decompress = false;
1889 req->cb_fn = cb_fn;
1890 req->cb_arg = cb_arg;
1891 req->reduce_errno = 0;
1894 _start_readv_request(req);
1896 TAILQ_INSERT_TAIL(&vol->queued_requests, req, tailq);
1901 _start_writev_request(struct spdk_reduce_vol_request *req)
1903 struct spdk_reduce_vol *vol = req->vol;
1905 RB_INSERT(executing_req_tree, &req->vol->executing_requests, req);
1906 if (vol->pm_logical_map[req->logical_map_index] != REDUCE_EMPTY_MAP_ENTRY) {
1907 if ((req->length * vol->params.logical_block_size) < vol->params.chunk_size) {
1911 req->rmw = true;
1912 _reduce_vol_read_chunk(req, _write_read_done);
1917 req->rmw = false;
1919 _prepare_compress_chunk(req, true);
1920 _reduce_vol_compress_chunk(req, _write_compress_done);
1928 struct spdk_reduce_vol_request *req;
1950 req = TAILQ_FIRST(&vol->free_requests);
1951 if (req == NULL) {
1956 TAILQ_REMOVE(&vol->free_requests, req, tailq);
1957 req->type = REDUCE_IO_WRITEV;
1958 req->vol = vol;
1959 req->iov = iov;
1960 req->iovcnt = iovcnt;
1961 req->offset = offset;
1962 req->logical_map_index = logical_map_index;
1963 req->length = length;
1964 req->copy_after_decompress = false;
1965 req->cb_fn = cb_fn;
1966 req->cb_arg = cb_arg;
1967 req->reduce_errno = 0;
1970 _start_writev_request(req);
1972 TAILQ_INSERT_TAIL(&vol->queued_requests, req, tailq);
1979 struct spdk_reduce_vol_request *req = ctx;
1980 struct spdk_reduce_vol *vol = req->vol;
1983 RB_INSERT(executing_req_tree, &req->vol->executing_requests, req);
1985 chunk_map_index = vol->pm_logical_map[req->logical_map_index];
1988 vol->pm_logical_map[req->logical_map_index] = REDUCE_EMPTY_MAP_ENTRY;
1989 _reduce_persist(vol, &vol->pm_logical_map[req->logical_map_index], sizeof(uint64_t));
1991 _reduce_vol_complete_req(req, 0);
1999 struct spdk_reduce_vol_request *req;
2019 req = TAILQ_FIRST(&vol->free_requests);
2020 if (req == NULL) {
2025 TAILQ_REMOVE(&vol->free_requests, req, tailq);
2026 req->type = REDUCE_IO_UNMAP;
2027 req->vol = vol;
2028 req->iov = NULL;
2029 req->iovcnt = 0;
2030 req->offset = offset;
2031 req->logical_map_index = logical_map_index;
2032 req->length = length;
2033 req->copy_after_decompress = false;
2034 req->cb_fn = cb_fn;
2035 req->cb_arg = cb_arg;
2036 req->reduce_errno = 0;
2039 _start_unmap_request_full_chunk(req);
2041 TAILQ_INSERT_TAIL(&vol->queued_requests, req, tailq);