Lines Matching defs:seg
323 struct vm_phys_seg *seg;
332 seg = &vm_phys_segs[segind];
334 (uintmax_t)seg->start);
336 (uintmax_t)seg->end);
337 sbuf_printf(&sbuf, "domain: %d\n", seg->domain);
338 sbuf_printf(&sbuf, "free list: %p\n", seg->free_queues);
422 struct vm_phys_seg *seg;
428 seg = &vm_phys_segs[vm_phys_nsegs++];
429 while (seg > vm_phys_segs && (seg - 1)->start >= end) {
430 *seg = *(seg - 1);
431 seg--;
433 seg->start = start;
434 seg->end = end;
435 seg->domain = domain;
513 struct vm_phys_seg *end_seg, *prev_seg, *seg, *tmp_seg;
530 seg = &vm_phys_segs[segind];
532 if (seg->end <= VM_LOWMEM_BOUNDARY)
546 seg->end <= VM_DMA32_BOUNDARY)
552 npages += atop(seg->end - seg->start);
576 seg = &vm_phys_segs[segind];
578 seg->first_page = &vm_page_array[npages];
579 npages += atop(seg->end - seg->start);
581 seg->first_page = PHYS_TO_VM_PAGE(seg->start);
584 if (seg->end <= VM_LOWMEM_BOUNDARY) {
591 if (seg->end <= VM_DMA32_BOUNDARY) {
602 seg->free_queues = &vm_phys_free_queues[seg->domain][flind];
610 seg = &vm_phys_segs[1];
612 while (seg < end_seg) {
613 if (prev_seg->end == seg->start &&
614 prev_seg->free_queues == seg->free_queues) {
615 prev_seg->end = seg->end;
616 KASSERT(prev_seg->domain == seg->domain,
620 for (tmp_seg = seg; tmp_seg < end_seg; tmp_seg++)
623 prev_seg = seg;
624 seg++;
999 vm_phys_seg_paddr_to_vm_page(struct vm_phys_seg *seg, vm_paddr_t pa)
1001 KASSERT(pa >= seg->start && pa < seg->end,
1004 return (&seg->first_page[atop(pa - seg->start)]);
1013 struct vm_phys_seg *seg;
1015 if ((seg = vm_phys_paddr_to_seg(pa)) != NULL)
1016 return (vm_phys_seg_paddr_to_vm_page(seg, pa));
1023 struct vm_phys_fictitious_seg tmp, *seg;
1031 seg = RB_FIND(fict_tree, &vm_phys_fictitious_tree, &tmp);
1033 if (seg == NULL)
1036 m = &seg->first_page[atop(pa - seg->start)];
1060 struct vm_phys_fictitious_seg *seg;
1129 seg = malloc(sizeof(*seg), M_FICT_PAGES, M_WAITOK | M_ZERO);
1130 seg->start = start;
1131 seg->end = end;
1132 seg->first_page = fp;
1135 RB_INSERT(fict_tree, &vm_phys_fictitious_tree, seg);
1144 struct vm_phys_fictitious_seg *seg, tmp;
1190 seg = RB_FIND(fict_tree, &vm_phys_fictitious_tree, &tmp);
1191 if (seg->start != start || seg->end != end) {
1197 RB_REMOVE(fict_tree, &vm_phys_fictitious_tree, seg);
1199 free(seg->first_page, M_FICT_PAGES);
1200 free(seg, M_FICT_PAGES);
1213 struct vm_phys_seg *seg;
1224 seg = &vm_phys_segs[m->segind];
1225 vm_domain_free_assert_locked(VM_DOMAIN(seg->domain));
1230 if (pa < seg->start || pa >= seg->end)
1232 m_buddy = vm_phys_seg_paddr_to_vm_page(seg, pa);
1235 fl = (*seg->free_queues)[m_buddy->pool];
1240 m = vm_phys_seg_paddr_to_vm_page(seg, pa);
1243 fl = (*seg->free_queues)[pool];
1359 struct vm_phys_seg *seg;
1369 seg = &vm_phys_segs[m->segind];
1370 fl = (*seg->free_queues)[pool];
1384 KASSERT(seg == &vm_phys_segs[m->segind],
1443 struct vm_phys_seg *end_seg, *seg;
1448 for (seg = &vm_phys_segs[segind]; seg < end_seg; seg++) {
1449 if (seg->domain != domain)
1451 if (seg->start >= high)
1453 pa_start = MAX(low, seg->start);
1454 pa_end = MIN(high, seg->end);
1463 bounds[0] = vm_phys_seg_paddr_to_vm_page(seg, pa_start);
1464 bounds[1] = &seg->first_page[atop(pa_end - seg->start)];
1465 return (seg - vm_phys_segs);
1481 struct vm_phys_seg *seg;
1486 seg = vm_phys_paddr_to_seg(pa);
1487 vm_domain_free_assert_locked(VM_DOMAIN(seg->domain));
1493 vm_phys_lazy_init_domain(seg->domain, true);
1506 if (pa >= seg->start)
1507 m_set = vm_phys_seg_paddr_to_vm_page(seg, pa);
1526 fl = (*seg->free_queues)[pool];
1533 m_tmp = vm_phys_seg_paddr_to_vm_page(seg, pa_half);
1536 m_set = vm_phys_seg_paddr_to_vm_page(seg, pa_half);
1553 struct vm_phys_seg *seg;
1575 seg = &vm_phys_segs[m->segind];
1576 if (VM_PAGE_TO_PHYS(m) < MAX(low, seg->start))
1579 VM_PAGE_TO_PHYS(m) - max_size >= MAX(low, seg->start) &&
1591 VM_PAGE_TO_PHYS(m_ret) + size <= MIN(high, seg->end) &&
1600 if (VM_PAGE_TO_PHYS(m_ret) + size > MIN(high, seg->end))
1689 struct vm_phys_seg *seg;
1702 seg = &vm_phys_segs[segind];
1703 if (seg->start >= high || seg->domain != domain)
1705 if (low >= seg->end)
1707 if (low <= seg->start)
1708 pa_start = seg->start;
1711 if (high < seg->end)
1714 pa_end = seg->end;
1723 if (seg->free_queues == queues)
1725 queues = seg->free_queues;
1752 seg = &vm_phys_segs[m_run->segind];
1753 KASSERT(seg->domain == domain,
1886 struct vm_phys_seg *seg;
1893 seg = &vm_phys_early_segs[vm_phys_early_nsegs++];
1894 seg->start = start;
1895 seg->end = end;
1991 struct vm_phys_seg *seg;
2000 seg = &vm_phys_early_segs[i];
2001 vm_phys_add_seg(seg->start, seg->end);