xref: /netbsd-src/sys/arch/riscv/riscv/riscv_tlb.c (revision 6345bad4cf90683aa020ee19fd41aef146c00135)
1 /* $NetBSD: riscv_tlb.c,v 1.2 2023/09/03 08:48:20 skrll Exp $ */
2 
3 /*
4  * Copyright (c) 2014, 2019, 2021 The NetBSD Foundation, Inc.
5  * All rights reserved.
6  *
7  * This code is derived from software contributed to The NetBSD Foundation
8  * by Matt Thomas (of 3am Software Foundry), Maxime Villard, and
9  * Nick Hudson.
10  *
11  * Redistribution and use in source and binary forms, with or without
12  * modification, are permitted provided that the following conditions
13  * are met:
14  * 1. Redistributions of source code must retain the above copyright
15  *    notice, this list of conditions and the following disclaimer.
16  * 2. Redistributions in binary form must reproduce the above copyright
17  *    notice, this list of conditions and the following disclaimer in the
18  *    documentation and/or other materials provided with the distribution.
19  *
20  * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
21  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
22  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
23  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
24  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
25  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
26  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
27  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
28  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
29  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
30  * POSSIBILITY OF SUCH DAMAGE.
31  */
32 
33 #include "opt_riscv_debug.h"
34 #include "opt_multiprocessor.h"
35 
36 #include <sys/cdefs.h>
37 __RCSID("$NetBSD: riscv_tlb.c,v 1.2 2023/09/03 08:48:20 skrll Exp $");
38 
39 #include <sys/param.h>
40 #include <sys/types.h>
41 
42 #include <sys/cpu.h>
43 
44 #include <uvm/uvm_extern.h>
45 
46 #include <riscv/sbi.h>
47 
48 tlb_asid_t
tlb_get_asid(void)49 tlb_get_asid(void)
50 {
51 	return csr_asid_read();
52 }
53 
54 void
tlb_set_asid(tlb_asid_t asid,struct pmap * pm)55 tlb_set_asid(tlb_asid_t asid, struct pmap *pm)
56 {
57 	csr_asid_write(asid);
58 }
59 
60 void
tlb_invalidate_all(void)61 tlb_invalidate_all(void)
62 {
63 	asm volatile("sfence.vma"
64 	    : /* output operands */
65 	    : /* input operands */
66 	    : "memory");
67 }
68 
69 void
tlb_invalidate_globals(void)70 tlb_invalidate_globals(void)
71 {
72 	tlb_invalidate_all();
73 }
74 
75 void
tlb_invalidate_asids(tlb_asid_t lo,tlb_asid_t hi)76 tlb_invalidate_asids(tlb_asid_t lo, tlb_asid_t hi)
77 {
78 	tlb_asid_t asid;
79 	for (asid = lo; asid <= hi; asid++) {
80 		asm volatile("sfence.vma zero, %[asid]"
81 		    : /* output operands */
82 		    : [asid] "r" (asid)
83 		    : "memory");
84 	}
85 #ifdef MULTIPROCESSOR
86 #if PMAP_TLB_MAX == 1
87 	const cpuid_t myhartid = curcpu()->ci_cpuid;
88 	unsigned long hartmask = 0;
89 	struct cpu_info *ci;
90 	CPU_INFO_ITERATOR cii;
91 	for (CPU_INFO_FOREACH(cii, ci)) {
92 		const cpuid_t hartid = ci->ci_cpuid;
93 
94 		if (hartid == myhartid)
95 			continue;
96 
97 		KASSERT(hartid < sizeof(unsigned long) * NBBY);
98 		hartmask |= __BIT(hartid);
99 	}
100 	for (asid = lo; asid <= hi; asid++) {
101 		struct sbiret sbiret = sbi_remote_sfence_vma_asid(hartmask,
102 		    0 /* hartmask_base */, 0, ~0UL, asid);
103 
104 		KASSERTMSG(sbiret.error == SBI_SUCCESS, "error %ld",
105 		    sbiret.error);
106 	}
107 #endif
108 #endif
109 }
110 
111 void
tlb_invalidate_addr(vaddr_t va,tlb_asid_t asid)112 tlb_invalidate_addr(vaddr_t va, tlb_asid_t asid)
113 {
114 	if (asid == KERNEL_PID) {
115 		asm volatile("sfence.vma %[va]"
116 		    : /* output operands */
117 		    : [va] "r" (va)
118 		    : "memory");
119 	} else {
120 		asm volatile("sfence.vma %[va], %[asid]"
121 		    : /* output operands */
122 		    : [va] "r" (va), [asid] "r" (asid)
123 		    : "memory");
124 	}
125 #ifdef MULTIPROCESSOR
126 #if PMAP_TLB_MAX == 1
127 	const cpuid_t myhartid = curcpu()->ci_cpuid;
128 	unsigned long hartmask = 0;
129 	struct cpu_info *ci;
130 	CPU_INFO_ITERATOR cii;
131 	for (CPU_INFO_FOREACH(cii, ci)) {
132 		const cpuid_t hartid = ci->ci_cpuid;
133 		if (hartid == myhartid)
134 			continue;
135 
136 		KASSERT(hartid < sizeof(unsigned long) * NBBY);
137 		hartmask |= __BIT(hartid);
138 	}
139 	struct sbiret sbiret = sbi_remote_sfence_vma(hartmask,
140 	    0 /* hartmask_base */, va, PAGE_SIZE);
141 
142 	KASSERTMSG(sbiret.error == SBI_SUCCESS, "error %ld",
143 	    sbiret.error);
144 #endif
145 #endif
146 }
147 
148 bool
tlb_update_addr(vaddr_t va,tlb_asid_t asid,pt_entry_t pte,bool insert_p)149 tlb_update_addr(vaddr_t va, tlb_asid_t asid, pt_entry_t pte, bool insert_p)
150 {
151 	KASSERT((va & PAGE_MASK) == 0);
152 
153 	tlb_invalidate_addr(va, asid);
154 
155 	return true;
156 }
157 
158 u_int
tlb_record_asids(u_long * ptr,tlb_asid_t asid_max)159 tlb_record_asids(u_long *ptr, tlb_asid_t asid_max)
160 {
161 	memset(ptr, 0xff, PMAP_TLB_NUM_PIDS / NBBY);
162 	ptr[0] = -2UL;
163 
164 	return PMAP_TLB_NUM_PIDS - 1;
165 }
166 
167 void
tlb_walk(void * ctx,bool (* func)(void *,vaddr_t,tlb_asid_t,pt_entry_t))168 tlb_walk(void *ctx, bool (*func)(void *, vaddr_t, tlb_asid_t, pt_entry_t))
169 {
170 	/* no way to view the TLB */
171 }
172