Lines Matching +full:trim +full:- +full:data +full:- +full:valid

1 /*-
2 * SPDX-License-Identifier: BSD-4-Clause
36 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
50 * greatly re-simplify the vnode_pager.
156 object = vp->v_object;
161 if (vn_getsize_locked(vp, &size, td->td_ucred) != 0)
167 object = vnode_pager_alloc(vp, size, 0, 0, td->td_ucred);
175 last = refcount_release(&object->ref_count);
180 VNASSERT(vp->v_object != NULL, vp, ("%s: NULL object", __func__));
212 obj = vp->v_object;
213 if (obj == NULL || obj->handle != vp)
217 MPASS(obj->type == OBJT_VNODE);
219 if (obj->ref_count == 0) {
220 KASSERT((obj->flags & OBJ_DEAD) == 0,
232 BO_LOCK(&vp->v_bufobj);
233 vp->v_bufobj.bo_flag |= BO_DEAD;
234 BO_UNLOCK(&vp->v_bufobj);
240 * Woe to the process that tries to page now :-).
245 KASSERT(vp->v_object == NULL, ("vp %p obj %p", vp, vp->v_object));
267 VNPASS(vp->v_usecount > 0, vp);
269 object = vp->v_object;
278 object->un_pager.vnp.vnp_size = size;
279 object->un_pager.vnp.writemappings = 0;
280 object->domain.dr_policy = vnode_domainset;
281 object->handle = handle;
282 if ((vp->v_vflag & VV_VMSIZEVNLOCK) != 0) {
288 if (vp->v_object != NULL) {
294 KASSERT(object->ref_count == 1,
295 ("leaked ref %p %d", object, object->ref_count));
296 object->type = OBJT_DEAD;
297 refcount_init(&object->ref_count, 0);
302 vp->v_object = object;
308 if ((object->flags & OBJ_COLORED) == 0) {
327 vp = object->handle;
333 refs = object->ref_count;
335 object->handle = NULL;
336 object->type = OBJT_DEAD;
338 if (object->un_pager.vnp.writemappings > 0) {
339 object->un_pager.vnp.writemappings = 0;
340 VOP_ADD_WRITECOUNT_CHECKED(vp, -1);
342 __func__, vp, vp->v_writecount);
344 vp->v_object = NULL;
349 * following object->handle. Clear all text references now.
354 if (vp->v_writecount < 0)
355 vp->v_writecount = 0;
367 struct vnode *vp = object->handle;
387 if (IDX_TO_OFF(pindex) >= object->un_pager.vnp.vnp_size)
390 bsize = vp->v_mount->mnt_stat.f_iosize;
404 if (bn == -1)
407 poff = pindex - (reqblock * pagesperblock);
420 roundup2(object->size, pagesperblock),
423 (uintmax_t )object->size));
425 *after += pagesperblock - (poff + 1);
426 if (pindex + *after >= object->size)
427 *after = object->size - 1 - pindex;
442 * Internal routine clearing partial-page content
451 size = end - base;
454 * Clear out partial-page garbage in case
460 * Update the valid bits to reflect the blocks
461 * that have been zeroed. Some of these valid
476 * Clear out partial-page dirty bits.
479 * valid bits. This would prevent
483 vm_page_clear_dirty(m, base, end - base);
503 if ((object = vp->v_object) == NULL)
509 mp = vp->v_mount;
510 if (mp != NULL && (mp->mnt_kern_flag & MNTK_VMSETSIZE_BUG) == 0)
516 if (object->type == OBJT_DEAD) {
520 KASSERT(object->type == OBJT_VNODE,
521 ("not vnode-backed object %p", object));
522 if (nsize == object->un_pager.vnp.vnp_size) {
530 if (nsize < object->un_pager.vnp.vnp_size) {
534 if (nobjsize < object->size)
535 vm_object_page_remove(object, nobjsize, object->size,
542 * completely invalid page and mark it partially valid
557 object->un_pager.vnp.vnp_size = nsize;
559 atomic_store_64(&object->un_pager.vnp.vnp_size, nsize);
561 object->size = nobjsize;
568 * operation. Partial-page area not aligned to page boundaries will be zeroed
582 object = vp->v_object;
636 return -1;
638 bsize = vp->v_mount->mnt_stat.f_iosize;
644 if (*rtaddress != -1)
649 *run -= voffset / PAGE_SIZE;
679 vp = object->handle;
683 bsize = vp->v_mount->mnt_stat.f_iosize;
693 if (m->valid & bits)
696 address = IDX_TO_OFF(m->pindex) + i * bsize;
697 if (address >= object->un_pager.vnp.vnp_size) {
698 fileaddr = -1;
704 if (fileaddr != -1) {
708 bp->b_iocmd = BIO_READ;
709 bp->b_iodone = vnode_pager_input_bdone;
710 KASSERT(bp->b_rcred == NOCRED, ("leaking read ucred"));
711 KASSERT(bp->b_wcred == NOCRED, ("leaking write ucred"));
712 bp->b_rcred = crhold(curthread->td_ucred);
713 bp->b_wcred = crhold(curthread->td_ucred);
714 bp->b_data = (caddr_t)sf_buf_kva(sf) + i * bsize;
715 bp->b_blkno = fileaddr;
717 bp->b_vp = vp;
718 bp->b_bcount = bsize;
719 bp->b_bufsize = bsize;
720 (void)runningbufclaim(bp, bp->b_bufsize);
723 bp->b_iooffset = dbtob(bp->b_blkno);
728 if ((bp->b_ioflags & BIO_ERROR) != 0) {
729 KASSERT(bp->b_error != 0,
731 error = bp->b_error;
737 bp->b_vp = NULL;
744 KASSERT((m->dirty & bits) == 0,
746 vm_page_bits_set(m, &m->valid, bits);
774 if (IDX_TO_OFF(m->pindex) >= object->un_pager.vnp.vnp_size) {
778 if (IDX_TO_OFF(m->pindex) + size > object->un_pager.vnp.vnp_size)
779 size = object->un_pager.vnp.vnp_size - IDX_TO_OFF(m->pindex);
780 vp = object->handle;
793 auio.uio_offset = IDX_TO_OFF(m->pindex);
799 error = VOP_READ(vp, &auio, 0, curthread->td_ucred);
801 int count = size - auio.uio_resid;
807 PAGE_SIZE - count);
813 KASSERT(m->dirty == 0, ("vnode_pager_input_old: page %p is dirty", m));
839 vp = object->handle;
853 vp = object->handle;
862 * local filesystems, where partially valid pages can only occur at
869 return (vnode_pager_generic_getpages(ap->a_vp, ap->a_m, ap->a_count,
870 ap->a_rbehind, ap->a_rahead, NULL, NULL));
878 error = vnode_pager_generic_getpages(ap->a_vp, ap->a_m, ap->a_count,
879 ap->a_rbehind, ap->a_rahead, ap->a_iodone, ap->a_arg);
880 if (error != 0 && ap->a_iodone != NULL)
881 ap->a_iodone(ap->a_arg, ap->a_m, ap->a_count, error);
904 KASSERT(vp->v_type != VCHR && vp->v_type != VBLK,
910 object = vp->v_object;
911 foff = IDX_TO_OFF(m[0]->pindex);
912 bsize = vp->v_mount->mnt_stat.f_iosize;
915 KASSERT(foff < object->un_pager.vnp.vnp_size,
921 * The last page has valid blocks. Invalid part can only
922 * exist at the end of file, and the page is made fully valid
925 if (!vm_page_none_valid(m[count - 1]) && --count == 0) {
932 MPASS((bp->b_flags & B_MAXPHYS) != 0);
939 error = VOP_BMAP(vp, foff / bsize, &bo, &bp->b_blkno, &after, &before);
977 if (bp->b_blkno == -1) {
983 KASSERT(m[0]->dirty == 0, ("%s: page %p is dirty",
990 blkno0 = bp->b_blkno;
992 bp->b_blkno += (foff % bsize) / DEV_BSIZE;
999 after += pagesperblock - (poff + 1);
1000 if (m[0]->pindex + after >= object->size)
1001 after = object->size - 1 - m[0]->pindex;
1004 after -= count - 1;
1006 /* Trim requested rbehind/rahead to possible values. */
1010 rbehind = min(rbehind, m[0]->pindex);
1012 rahead = min(rahead, object->size - m[count - 1]->pindex);
1014 * Check that total amount of pages fit into buf. Trim rbehind and
1018 int trim, sum;
1020 trim = rbehind + rahead + count - atop(maxphys) + 1;
1023 /* Roundup rbehind trim to block size. */
1024 rbehind -= roundup(trim * rbehind / sum, pagesperblock);
1028 rbehind -= trim * rbehind / sum;
1029 rahead -= trim * rahead / sum;
1036 * Fill in the bp->b_pages[] array with requested and optional
1042 i = bp->b_npages = 0;
1048 startpindex = m[0]->pindex - rbehind;
1050 p->pindex >= startpindex)
1051 startpindex = p->pindex + 1;
1054 for (tpindex = m[0]->pindex - 1;
1055 tpindex >= startpindex && tpindex < m[0]->pindex;
1056 tpindex--, i++) {
1061 bp->b_pages[j] = bp->b_pages[j +
1062 tpindex + 1 - startpindex];
1065 bp->b_pages[tpindex - startpindex] = p;
1068 bp->b_pgbefore = i;
1069 bp->b_npages += i;
1070 bp->b_blkno -= IDX_TO_OFF(i) / DEV_BSIZE;
1072 bp->b_pgbefore = 0;
1076 bp->b_pages[i] = m[j];
1077 bp->b_npages += count;
1085 endpindex = m[count - 1]->pindex + rahead + 1;
1086 if ((p = TAILQ_NEXT(m[count - 1], listq)) != NULL &&
1087 p->pindex < endpindex)
1088 endpindex = p->pindex;
1089 if (endpindex > object->size)
1090 endpindex = object->size;
1092 for (tpindex = m[count - 1]->pindex + 1;
1097 bp->b_pages[i] = p;
1100 bp->b_pgafter = i - bp->b_npages;
1101 bp->b_npages = i;
1103 bp->b_pgafter = 0;
1110 *a_rbehind = bp->b_pgbefore;
1112 *a_rahead = bp->b_pgafter;
1115 KASSERT(bp->b_npages <= atop(maxphys),
1117 for (int j = 1, prev = 0; j < bp->b_npages; j++) {
1118 if (bp->b_pages[j] == bogus_page)
1120 KASSERT(bp->b_pages[j]->pindex - bp->b_pages[prev]->pindex ==
1121 j - prev, ("%s: pages array not consecutive, bp %p",
1132 foff = IDX_TO_OFF(bp->b_pages[0]->pindex);
1133 bytecount = bp->b_npages << PAGE_SHIFT;
1134 if ((foff + bytecount) > object->un_pager.vnp.vnp_size)
1135 bytecount = object->un_pager.vnp.vnp_size - foff;
1136 secmask = bo->bo_bsize - 1;
1145 if ((vp->v_mount->mnt_kern_flag & MNTK_UNMAPPED_BUFS) != 0 &&
1147 bp->b_data = unmapped_buf;
1148 bp->b_offset = 0;
1150 bp->b_data = bp->b_kvabase;
1151 pmap_qenter((vm_offset_t)bp->b_data, bp->b_pages, bp->b_npages);
1155 bp->b_iocmd = BIO_READ;
1156 KASSERT(bp->b_rcred == NOCRED, ("leaking read ucred"));
1157 KASSERT(bp->b_wcred == NOCRED, ("leaking write ucred"));
1158 bp->b_rcred = crhold(curthread->td_ucred);
1159 bp->b_wcred = crhold(curthread->td_ucred);
1161 bp->b_vp = vp;
1162 bp->b_bcount = bp->b_bufsize = bytecount;
1163 bp->b_iooffset = dbtob(bp->b_blkno);
1164 KASSERT(IDX_TO_OFF(m[0]->pindex - bp->b_pages[0]->pindex) ==
1165 (blkno0 - bp->b_blkno) * DEV_BSIZE +
1166 IDX_TO_OFF(m[0]->pindex) % bsize,
1169 (uintmax_t)m[0]->pindex, (uintmax_t)bp->b_pages[0]->pindex,
1170 (uintmax_t)blkno0, (uintmax_t)bp->b_blkno));
1172 (void)runningbufclaim(bp, bp->b_bufsize);
1175 VM_CNT_ADD(v_vnodepgsin, bp->b_npages);
1178 bp->b_pgiodone = iodone;
1179 bp->b_caller1 = arg;
1180 bp->b_iodone = vnode_pager_generic_getpages_done_async;
1181 bp->b_flags |= B_ASYNC;
1186 bp->b_iodone = bdone;
1190 for (i = 0; i < bp->b_npages; i++)
1191 bp->b_pages[i] = NULL;
1192 bp->b_vp = NULL;
1206 bp->b_pgiodone(bp->b_caller1, bp->b_pages + bp->b_pgbefore,
1207 bp->b_npages - bp->b_pgbefore - bp->b_pgafter, error);
1208 for (int i = 0; i < bp->b_npages; i++)
1209 bp->b_pages[i] = NULL;
1210 bp->b_vp = NULL;
1222 KASSERT((bp->b_ioflags & BIO_ERROR) == 0 || bp->b_error != 0,
1224 error = (bp->b_ioflags & BIO_ERROR) != 0 ? bp->b_error : 0;
1225 object = bp->b_vp->v_object;
1229 if (error == 0 && bp->b_bcount != bp->b_npages * PAGE_SIZE) {
1231 bp->b_data = bp->b_kvabase;
1232 pmap_qenter((vm_offset_t)bp->b_data, bp->b_pages,
1233 bp->b_npages);
1235 bzero(bp->b_data + bp->b_bcount,
1236 PAGE_SIZE * bp->b_npages - bp->b_bcount);
1239 pmap_qremove((vm_offset_t)bp->b_data, bp->b_npages);
1240 bp->b_data = unmapped_buf;
1246 * or by the bp->b_pgiodone callback (for async requests).
1250 for (i = 0; i < bp->b_pgbefore; i++)
1251 vm_page_free_invalid(bp->b_pages[i]);
1252 for (i = bp->b_npages - bp->b_pgafter; i < bp->b_npages; i++)
1253 vm_page_free_invalid(bp->b_pages[i]);
1260 for (i = 0, tfoff = IDX_TO_OFF(bp->b_pages[0]->pindex);
1261 i < bp->b_npages; i++, tfoff = nextoff) {
1265 mt = bp->b_pages[i];
1269 if (nextoff <= object->un_pager.vnp.vnp_size) {
1274 KASSERT(mt->dirty == 0,
1282 * Currently we do not set the entire page valid,
1287 object->un_pager.vnp.vnp_size - tfoff);
1288 KASSERT((mt->dirty & vm_page_bits(0,
1289 object->un_pager.vnp.vnp_size - tfoff)) == 0,
1293 if (i < bp->b_pgbefore || i >= bp->b_npages - bp->b_pgafter)
1319 * to prevent a low-memory deadlock. VOP operations often need to
1333 * Call device-specific putpages function
1335 vp = object->handle;
1354 KASSERT(IDX_TO_OFF(m->pindex) <= offset &&
1355 offset < IDX_TO_OFF(m->pindex + 1),
1356 ("page %p pidx %ju offset %ju", m, (uintmax_t)m->pindex,
1358 return ((m->dirty & ((vm_page_bits_t)1 << vn_off2bidx(offset))) != 0);
1367 * underlying filesystem to write the data out asynchronously rather
1385 object = vp->v_object;
1391 if ((int64_t)ma[0]->pindex < 0) {
1393 "attempt to write meta-data 0x%jx(%lx)\n",
1394 (uintmax_t)ma[0]->pindex, (u_long)ma[0]->dirty);
1402 poffset = IDX_TO_OFF(ma[0]->pindex);
1405 * If the page-aligned write is larger then the actual file we
1407 * there is an edge case where a file may not be page-aligned where
1413 * We do not under any circumstances truncate the valid bits, as
1417 if (maxsize + poffset > object->un_pager.vnp.vnp_size) {
1418 if (object->un_pager.vnp.vnp_size > poffset) {
1419 maxsize = object->un_pager.vnp.vnp_size - poffset;
1430 m = ma[ncount - 1];
1433 ("vnode_pager_generic_putpages: page %p is not read-only", m));
1434 MPASS(m->dirty != 0);
1435 vm_page_clear_dirty(m, pgoff, PAGE_SIZE -
1456 m = ma[OFF_TO_IDX(prev_offset - poffset)];
1472 m = ma[OFF_TO_IDX(next_offset - poffset)];
1495 prev_resid = auio.uio_resid = aiov.iov_len = next_offset -
1498 vnode_pager_putpages_ioflags(flags), curthread->td_ucred);
1500 wrsz = prev_resid - auio.uio_resid;
1504 "zero-length write at %ju resid %zd\n",
1523 (uintmax_t)ma[0]->pindex);
1529 for (i = 0; i < OFF_TO_IDX(prev_offset - poffset); i++)
1536 rtvals[i] = ma[i]->dirty == 0 ? VM_PAGER_OK : VM_PAGER_ERROR;
1550 * from saturating the buffer cache. Dummy-up the sequential
1572 * were actually written. eof is the offset past the last valid byte
1599 * The page contains the last valid byte in
1605 vm_page_clear_dirty(ma[i], pos_devb, PAGE_SIZE -
1614 if (ma[i]->dirty == 0)
1634 if (object->type != OBJT_VNODE) {
1638 old_wm = object->un_pager.vnp.writemappings;
1639 object->un_pager.vnp.writemappings += (vm_ooffset_t)end - start;
1640 vp = object->handle;
1641 if (old_wm == 0 && object->un_pager.vnp.writemappings != 0) {
1645 __func__, vp, vp->v_writecount);
1646 } else if (old_wm != 0 && object->un_pager.vnp.writemappings == 0) {
1648 VOP_ADD_WRITECOUNT_CHECKED(vp, -1);
1650 __func__, vp, vp->v_writecount);
1669 if (object->type != OBJT_VNODE) {
1678 inc = end - start;
1679 if (object->un_pager.vnp.writemappings != inc) {
1680 object->un_pager.vnp.writemappings -= inc;
1685 vp = object->handle;
1708 *vpp = object->handle;
1717 obj = vp->v_object;