11179b649SEmmanuel Vadot /*-
21179b649SEmmanuel Vadot * Copyright (c) 2010 Isilon Systems, Inc.
31179b649SEmmanuel Vadot * Copyright (c) 2016 Matthew Macy (mmacy@mattmacy.io)
41179b649SEmmanuel Vadot * Copyright (c) 2017 Mellanox Technologies, Ltd.
51179b649SEmmanuel Vadot * All rights reserved.
61179b649SEmmanuel Vadot *
71179b649SEmmanuel Vadot * Redistribution and use in source and binary forms, with or without
81179b649SEmmanuel Vadot * modification, are permitted provided that the following conditions
91179b649SEmmanuel Vadot * are met:
101179b649SEmmanuel Vadot * 1. Redistributions of source code must retain the above copyright
111179b649SEmmanuel Vadot * notice unmodified, this list of conditions, and the following
121179b649SEmmanuel Vadot * disclaimer.
131179b649SEmmanuel Vadot * 2. Redistributions in binary form must reproduce the above copyright
141179b649SEmmanuel Vadot * notice, this list of conditions and the following disclaimer in the
151179b649SEmmanuel Vadot * documentation and/or other materials provided with the distribution.
161179b649SEmmanuel Vadot *
171179b649SEmmanuel Vadot * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
181179b649SEmmanuel Vadot * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
191179b649SEmmanuel Vadot * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
201179b649SEmmanuel Vadot * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
211179b649SEmmanuel Vadot * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
221179b649SEmmanuel Vadot * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
231179b649SEmmanuel Vadot * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
241179b649SEmmanuel Vadot * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
251179b649SEmmanuel Vadot * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
261179b649SEmmanuel Vadot * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
271179b649SEmmanuel Vadot */
281179b649SEmmanuel Vadot
291179b649SEmmanuel Vadot #include <sys/param.h>
301179b649SEmmanuel Vadot #include <sys/systm.h>
311179b649SEmmanuel Vadot #include <sys/rwlock.h>
321179b649SEmmanuel Vadot
331179b649SEmmanuel Vadot #include <vm/vm.h>
341179b649SEmmanuel Vadot #include <vm/pmap.h>
351179b649SEmmanuel Vadot #include <vm/vm_object.h>
361179b649SEmmanuel Vadot #include <vm/vm_map.h>
371179b649SEmmanuel Vadot #include <vm/vm_page.h>
381179b649SEmmanuel Vadot #include <vm/vm_pager.h>
391179b649SEmmanuel Vadot
401179b649SEmmanuel Vadot #include <linux/fs.h>
411179b649SEmmanuel Vadot #include <linux/mm.h>
421179b649SEmmanuel Vadot #include <linux/shmem_fs.h>
431179b649SEmmanuel Vadot
441179b649SEmmanuel Vadot struct page *
linux_shmem_read_mapping_page_gfp(vm_object_t obj,int pindex,gfp_t gfp)451179b649SEmmanuel Vadot linux_shmem_read_mapping_page_gfp(vm_object_t obj, int pindex, gfp_t gfp)
461179b649SEmmanuel Vadot {
479e9c682fSBjoern A. Zeeb struct page *page;
481179b649SEmmanuel Vadot int rv;
491179b649SEmmanuel Vadot
50*8a8e86b8SJean-Sébastien Pédron if ((gfp & GFP_NOWAIT) != 0)
51*8a8e86b8SJean-Sébastien Pédron panic("GFP_NOWAIT is unimplemented");
521179b649SEmmanuel Vadot
531179b649SEmmanuel Vadot VM_OBJECT_WLOCK(obj);
541179b649SEmmanuel Vadot rv = vm_page_grab_valid(&page, obj, pindex, VM_ALLOC_NORMAL |
551179b649SEmmanuel Vadot VM_ALLOC_NOBUSY | VM_ALLOC_WIRED);
561179b649SEmmanuel Vadot VM_OBJECT_WUNLOCK(obj);
571179b649SEmmanuel Vadot if (rv != VM_PAGER_OK)
581179b649SEmmanuel Vadot return (ERR_PTR(-EINVAL));
591179b649SEmmanuel Vadot return (page);
601179b649SEmmanuel Vadot }
611179b649SEmmanuel Vadot
621179b649SEmmanuel Vadot struct linux_file *
linux_shmem_file_setup(const char * name,loff_t size,unsigned long flags)631179b649SEmmanuel Vadot linux_shmem_file_setup(const char *name, loff_t size, unsigned long flags)
641179b649SEmmanuel Vadot {
651179b649SEmmanuel Vadot struct fileobj {
661179b649SEmmanuel Vadot struct linux_file file __aligned(sizeof(void *));
671179b649SEmmanuel Vadot struct vnode vnode __aligned(sizeof(void *));
681179b649SEmmanuel Vadot };
691179b649SEmmanuel Vadot struct fileobj *fileobj;
701179b649SEmmanuel Vadot struct linux_file *filp;
711179b649SEmmanuel Vadot struct vnode *vp;
721179b649SEmmanuel Vadot int error;
731179b649SEmmanuel Vadot
741179b649SEmmanuel Vadot fileobj = kzalloc(sizeof(*fileobj), GFP_KERNEL);
751179b649SEmmanuel Vadot if (fileobj == NULL) {
761179b649SEmmanuel Vadot error = -ENOMEM;
771179b649SEmmanuel Vadot goto err_0;
781179b649SEmmanuel Vadot }
791179b649SEmmanuel Vadot filp = &fileobj->file;
801179b649SEmmanuel Vadot vp = &fileobj->vnode;
811179b649SEmmanuel Vadot
821179b649SEmmanuel Vadot filp->f_count = 1;
831179b649SEmmanuel Vadot filp->f_vnode = vp;
846b389740SMark Johnston filp->f_shmem = vm_pager_allocate(OBJT_SWAP, NULL, size,
851179b649SEmmanuel Vadot VM_PROT_READ | VM_PROT_WRITE, 0, curthread->td_ucred);
861179b649SEmmanuel Vadot if (filp->f_shmem == NULL) {
871179b649SEmmanuel Vadot error = -ENOMEM;
881179b649SEmmanuel Vadot goto err_1;
891179b649SEmmanuel Vadot }
901179b649SEmmanuel Vadot return (filp);
911179b649SEmmanuel Vadot err_1:
921179b649SEmmanuel Vadot kfree(filp);
931179b649SEmmanuel Vadot err_0:
941179b649SEmmanuel Vadot return (ERR_PTR(error));
951179b649SEmmanuel Vadot }
961179b649SEmmanuel Vadot
971179b649SEmmanuel Vadot static vm_ooffset_t
linux_invalidate_mapping_pages_sub(vm_object_t obj,vm_pindex_t start,vm_pindex_t end,int flags)981179b649SEmmanuel Vadot linux_invalidate_mapping_pages_sub(vm_object_t obj, vm_pindex_t start,
991179b649SEmmanuel Vadot vm_pindex_t end, int flags)
1001179b649SEmmanuel Vadot {
1011179b649SEmmanuel Vadot int start_count, end_count;
1021179b649SEmmanuel Vadot
1031179b649SEmmanuel Vadot VM_OBJECT_WLOCK(obj);
1041179b649SEmmanuel Vadot start_count = obj->resident_page_count;
1051179b649SEmmanuel Vadot vm_object_page_remove(obj, start, end, flags);
1061179b649SEmmanuel Vadot end_count = obj->resident_page_count;
1071179b649SEmmanuel Vadot VM_OBJECT_WUNLOCK(obj);
1081179b649SEmmanuel Vadot return (start_count - end_count);
1091179b649SEmmanuel Vadot }
1101179b649SEmmanuel Vadot
1111179b649SEmmanuel Vadot unsigned long
linux_invalidate_mapping_pages(vm_object_t obj,pgoff_t start,pgoff_t end)1121179b649SEmmanuel Vadot linux_invalidate_mapping_pages(vm_object_t obj, pgoff_t start, pgoff_t end)
1131179b649SEmmanuel Vadot {
1141179b649SEmmanuel Vadot
1151179b649SEmmanuel Vadot return (linux_invalidate_mapping_pages_sub(obj, start, end, OBJPR_CLEANONLY));
1161179b649SEmmanuel Vadot }
1171179b649SEmmanuel Vadot
1181179b649SEmmanuel Vadot void
linux_shmem_truncate_range(vm_object_t obj,loff_t lstart,loff_t lend)1191179b649SEmmanuel Vadot linux_shmem_truncate_range(vm_object_t obj, loff_t lstart, loff_t lend)
1201179b649SEmmanuel Vadot {
1211179b649SEmmanuel Vadot vm_pindex_t start = OFF_TO_IDX(lstart + PAGE_SIZE - 1);
1221179b649SEmmanuel Vadot vm_pindex_t end = OFF_TO_IDX(lend + 1);
1231179b649SEmmanuel Vadot
1241179b649SEmmanuel Vadot (void) linux_invalidate_mapping_pages_sub(obj, start, end, 0);
1251179b649SEmmanuel Vadot }
126