1 /* $OpenBSD: ttm_tt.c,v 1.4 2015/02/11 07:01:37 jsg Exp $ */ 2 /************************************************************************** 3 * 4 * Copyright (c) 2006-2009 VMware, Inc., Palo Alto, CA., USA 5 * All Rights Reserved. 6 * 7 * Permission is hereby granted, free of charge, to any person obtaining a 8 * copy of this software and associated documentation files (the 9 * "Software"), to deal in the Software without restriction, including 10 * without limitation the rights to use, copy, modify, merge, publish, 11 * distribute, sub license, and/or sell copies of the Software, and to 12 * permit persons to whom the Software is furnished to do so, subject to 13 * the following conditions: 14 * 15 * The above copyright notice and this permission notice (including the 16 * next paragraph) shall be included in all copies or substantial portions 17 * of the Software. 18 * 19 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 20 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 21 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL 22 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, 23 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR 24 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE 25 * USE OR OTHER DEALINGS IN THE SOFTWARE. 26 * 27 **************************************************************************/ 28 /* 29 * Authors: Thomas Hellstrom <thellstrom-at-vmware-dot-com> 30 */ 31 32 #define pr_fmt(fmt) "[TTM] " fmt 33 34 #include <dev/pci/drm/drmP.h> 35 #include <dev/pci/drm/drm_cache.h> 36 #include <dev/pci/drm/drm_mem_util.h> 37 #include <dev/pci/drm/ttm/ttm_module.h> 38 #include <dev/pci/drm/ttm/ttm_bo_driver.h> 39 #include <dev/pci/drm/ttm/ttm_placement.h> 40 #include <dev/pci/drm/ttm/ttm_page_alloc.h> 41 42 /** 43 * Allocates storage for pointers to the pages that back the ttm. 44 */ 45 static void ttm_tt_alloc_page_directory(struct ttm_tt *ttm) 46 { 47 ttm->pages = drm_calloc_large(ttm->num_pages, sizeof(void*)); 48 } 49 50 static void ttm_dma_tt_alloc_page_directory(struct ttm_dma_tt *ttm) 51 { 52 ttm->ttm.pages = drm_calloc_large(ttm->ttm.num_pages, sizeof(void*)); 53 ttm->dma_address = drm_calloc_large(ttm->ttm.num_pages, 54 sizeof(*ttm->dma_address)); 55 } 56 57 #ifdef CONFIG_X86 58 static inline int ttm_tt_set_page_caching(struct vm_page *p, 59 enum ttm_caching_state c_old, 60 enum ttm_caching_state c_new) 61 { 62 int ret = 0; 63 64 if (PageHighMem(p)) 65 return 0; 66 67 if (c_old != tt_cached) { 68 /* p isn't in the default caching state, set it to 69 * writeback first to free its current memtype. */ 70 71 ret = set_pages_wb(p, 1); 72 if (ret) 73 return ret; 74 } 75 76 if (c_new == tt_wc) 77 ret = set_memory_wc((unsigned long) page_address(p), 1); 78 else if (c_new == tt_uncached) 79 ret = set_pages_uc(p, 1); 80 81 return ret; 82 } 83 #else /* CONFIG_X86 */ 84 static inline int ttm_tt_set_page_caching(struct vm_page *p, 85 enum ttm_caching_state c_old, 86 enum ttm_caching_state c_new) 87 { 88 return 0; 89 } 90 #endif /* CONFIG_X86 */ 91 92 /* 93 * Change caching policy for the linear kernel map 94 * for range of pages in a ttm. 95 */ 96 97 static int ttm_tt_set_caching(struct ttm_tt *ttm, 98 enum ttm_caching_state c_state) 99 { 100 int i, j; 101 struct vm_page *cur_page; 102 int ret; 103 104 if (ttm->caching_state == c_state) 105 return 0; 106 107 if (ttm->state == tt_unpopulated) { 108 /* Change caching but don't populate */ 109 ttm->caching_state = c_state; 110 return 0; 111 } 112 113 if (ttm->caching_state == tt_cached) 114 #ifdef notyet 115 drm_clflush_pages(ttm->pages, ttm->num_pages); 116 #else 117 printf("%s partial stub\n", __func__); 118 #endif 119 120 for (i = 0; i < ttm->num_pages; ++i) { 121 cur_page = ttm->pages[i]; 122 if (likely(cur_page != NULL)) { 123 ret = ttm_tt_set_page_caching(cur_page, 124 ttm->caching_state, 125 c_state); 126 if (unlikely(ret != 0)) 127 goto out_err; 128 } 129 } 130 131 ttm->caching_state = c_state; 132 133 return 0; 134 135 out_err: 136 for (j = 0; j < i; ++j) { 137 cur_page = ttm->pages[j]; 138 if (likely(cur_page != NULL)) { 139 (void)ttm_tt_set_page_caching(cur_page, c_state, 140 ttm->caching_state); 141 } 142 } 143 144 return ret; 145 } 146 147 int ttm_tt_set_placement_caching(struct ttm_tt *ttm, uint32_t placement) 148 { 149 enum ttm_caching_state state; 150 151 if (placement & TTM_PL_FLAG_WC) 152 state = tt_wc; 153 else if (placement & TTM_PL_FLAG_UNCACHED) 154 state = tt_uncached; 155 else 156 state = tt_cached; 157 158 return ttm_tt_set_caching(ttm, state); 159 } 160 EXPORT_SYMBOL(ttm_tt_set_placement_caching); 161 162 void ttm_tt_destroy(struct ttm_tt *ttm) 163 { 164 if (unlikely(ttm == NULL)) 165 return; 166 167 if (ttm->state == tt_bound) { 168 ttm_tt_unbind(ttm); 169 } 170 171 if (ttm->state == tt_unbound) { 172 ttm->bdev->driver->ttm_tt_unpopulate(ttm); 173 } 174 175 if (!(ttm->page_flags & TTM_PAGE_FLAG_PERSISTENT_SWAP) && 176 ttm->swap_storage) 177 uao_detach(ttm->swap_storage); 178 179 ttm->swap_storage = NULL; 180 ttm->func->destroy(ttm); 181 } 182 183 int ttm_tt_init(struct ttm_tt *ttm, struct ttm_bo_device *bdev, 184 unsigned long size, uint32_t page_flags, 185 struct vm_page *dummy_read_page) 186 { 187 ttm->bdev = bdev; 188 ttm->glob = bdev->glob; 189 ttm->num_pages = (size + PAGE_SIZE - 1) >> PAGE_SHIFT; 190 ttm->caching_state = tt_cached; 191 ttm->page_flags = page_flags; 192 ttm->dummy_read_page = dummy_read_page; 193 ttm->state = tt_unpopulated; 194 ttm->swap_storage = NULL; 195 196 ttm_tt_alloc_page_directory(ttm); 197 if (!ttm->pages) { 198 ttm_tt_destroy(ttm); 199 pr_err("Failed allocating page table\n"); 200 return -ENOMEM; 201 } 202 return 0; 203 } 204 EXPORT_SYMBOL(ttm_tt_init); 205 206 void ttm_tt_fini(struct ttm_tt *ttm) 207 { 208 drm_free_large(ttm->pages); 209 ttm->pages = NULL; 210 } 211 EXPORT_SYMBOL(ttm_tt_fini); 212 213 int ttm_dma_tt_init(struct ttm_dma_tt *ttm_dma, struct ttm_bo_device *bdev, 214 unsigned long size, uint32_t page_flags, 215 struct vm_page *dummy_read_page) 216 { 217 struct ttm_tt *ttm = &ttm_dma->ttm; 218 219 ttm->bdev = bdev; 220 ttm->glob = bdev->glob; 221 ttm->num_pages = (size + PAGE_SIZE - 1) >> PAGE_SHIFT; 222 ttm->caching_state = tt_cached; 223 ttm->page_flags = page_flags; 224 ttm->dummy_read_page = dummy_read_page; 225 ttm->state = tt_unpopulated; 226 ttm->swap_storage = NULL; 227 228 INIT_LIST_HEAD(&ttm_dma->pages_list); 229 ttm_dma_tt_alloc_page_directory(ttm_dma); 230 if (!ttm->pages || !ttm_dma->dma_address) { 231 ttm_tt_destroy(ttm); 232 pr_err("Failed allocating page table\n"); 233 return -ENOMEM; 234 } 235 return 0; 236 } 237 EXPORT_SYMBOL(ttm_dma_tt_init); 238 239 void ttm_dma_tt_fini(struct ttm_dma_tt *ttm_dma) 240 { 241 struct ttm_tt *ttm = &ttm_dma->ttm; 242 243 drm_free_large(ttm->pages); 244 ttm->pages = NULL; 245 drm_free_large(ttm_dma->dma_address); 246 ttm_dma->dma_address = NULL; 247 } 248 EXPORT_SYMBOL(ttm_dma_tt_fini); 249 250 void ttm_tt_unbind(struct ttm_tt *ttm) 251 { 252 int ret; 253 254 if (ttm->state == tt_bound) { 255 ret = ttm->func->unbind(ttm); 256 BUG_ON(ret); 257 ttm->state = tt_unbound; 258 } 259 } 260 261 int ttm_tt_bind(struct ttm_tt *ttm, struct ttm_mem_reg *bo_mem) 262 { 263 int ret = 0; 264 265 if (!ttm) 266 return -EINVAL; 267 268 if (ttm->state == tt_bound) 269 return 0; 270 271 ret = ttm->bdev->driver->ttm_tt_populate(ttm); 272 if (ret) 273 return ret; 274 275 ret = ttm->func->bind(ttm, bo_mem); 276 if (unlikely(ret != 0)) 277 return ret; 278 279 ttm->state = tt_bound; 280 281 return 0; 282 } 283 EXPORT_SYMBOL(ttm_tt_bind); 284 285 int ttm_tt_swapin(struct ttm_tt *ttm) 286 { 287 struct uvm_object *swap_storage; 288 struct vm_page *from_page; 289 struct vm_page *to_page; 290 struct pglist plist; 291 int i; 292 int ret = -ENOMEM; 293 294 swap_storage = ttm->swap_storage; 295 BUG_ON(swap_storage == NULL); 296 297 TAILQ_INIT(&plist); 298 if (uvm_objwire(swap_storage, 0, ttm->num_pages << PAGE_SHIFT, &plist)) 299 goto out_err; 300 301 from_page = TAILQ_FIRST(&plist); 302 for (i = 0; i < ttm->num_pages; ++i) { 303 to_page = ttm->pages[i]; 304 if (unlikely(to_page == NULL)) 305 goto out_err; 306 307 uvm_pagecopy(from_page, to_page); 308 from_page = TAILQ_NEXT(from_page, pageq); 309 } 310 311 uvm_objunwire(swap_storage, 0, ttm->num_pages << PAGE_SHIFT); 312 313 if (!(ttm->page_flags & TTM_PAGE_FLAG_PERSISTENT_SWAP)) 314 uao_detach(swap_storage); 315 ttm->swap_storage = NULL; 316 ttm->page_flags &= ~TTM_PAGE_FLAG_SWAPPED; 317 318 return 0; 319 out_err: 320 return ret; 321 } 322 323 int ttm_tt_swapout(struct ttm_tt *ttm, struct uvm_object *persistent_swap_storage) 324 { 325 struct uvm_object *swap_storage; 326 struct vm_page *from_page; 327 struct vm_page *to_page; 328 struct pglist plist; 329 int i; 330 int ret = -ENOMEM; 331 332 BUG_ON(ttm->state != tt_unbound && ttm->state != tt_unpopulated); 333 BUG_ON(ttm->caching_state != tt_cached); 334 335 if (!persistent_swap_storage) { 336 swap_storage = uao_create(ttm->num_pages << PAGE_SHIFT, 0); 337 #ifdef notyet 338 if (unlikely(IS_ERR(swap_storage))) { 339 pr_err("Failed allocating swap storage\n"); 340 return PTR_ERR(swap_storage); 341 } 342 #endif 343 } else 344 swap_storage = persistent_swap_storage; 345 346 TAILQ_INIT(&plist); 347 if (uvm_objwire(swap_storage, 0, ttm->num_pages << PAGE_SHIFT, &plist)) 348 goto out_err; 349 350 to_page = TAILQ_FIRST(&plist); 351 for (i = 0; i < ttm->num_pages; ++i) { 352 from_page = ttm->pages[i]; 353 if (unlikely(from_page == NULL)) 354 continue; 355 uvm_pagecopy(from_page, to_page); 356 #ifdef notyet 357 set_page_dirty(to_page); 358 mark_page_accessed(to_page); 359 #endif 360 to_page = TAILQ_NEXT(to_page, pageq); 361 } 362 363 uvm_objunwire(swap_storage, 0, ttm->num_pages << PAGE_SHIFT); 364 365 ttm->bdev->driver->ttm_tt_unpopulate(ttm); 366 ttm->swap_storage = swap_storage; 367 ttm->page_flags |= TTM_PAGE_FLAG_SWAPPED; 368 if (persistent_swap_storage) 369 ttm->page_flags |= TTM_PAGE_FLAG_PERSISTENT_SWAP; 370 371 return 0; 372 out_err: 373 if (!persistent_swap_storage) 374 uao_detach(swap_storage); 375 376 return ret; 377 } 378