1 /* $NetBSD: mmu_sh4.c,v 1.21 2020/08/04 01:55:16 uwe Exp $ */
2
3 /*-
4 * Copyright (c) 2002 The NetBSD Foundation, Inc.
5 * All rights reserved.
6 *
7 * This code is derived from software contributed to The NetBSD Foundation
8 * by UCHIYAMA Yasushi.
9 *
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
12 * are met:
13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 * notice, this list of conditions and the following disclaimer in the
17 * documentation and/or other materials provided with the distribution.
18 *
19 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
20 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
21 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
23 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
24 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
25 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
26 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
27 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
28 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
29 * POSSIBILITY OF SUCH DAMAGE.
30 */
31
32 #include <sys/cdefs.h>
33 __KERNEL_RCSID(0, "$NetBSD: mmu_sh4.c,v 1.21 2020/08/04 01:55:16 uwe Exp $");
34
35 #include <sys/param.h>
36 #include <sys/systm.h>
37
38 #include <sh3/pte.h> /* NetBSD/sh3 specific PTE */
39 #include <sh3/mmu.h>
40 #include <sh3/mmu_sh4.h>
41
42 static __noinline void do_tlb_assoc(uint32_t);
43 static __noinline void do_invalidate_asid(int);
44 static __noinline void do_invalidate_all(uint32_t);
45
46 #define SH4_MMU_HAZARD __asm volatile("nop;nop;nop;nop;nop;nop;nop;nop;")
47
48
49 void
sh4_mmu_start(void)50 sh4_mmu_start(void)
51 {
52 uint32_t cr;
53
54 /* Zero clear all TLB entry */
55 _reg_write_4(SH4_MMUCR, 0); /* zero wired entry */
56 sh4_tlb_invalidate_all();
57
58 /* Set current ASID to 0 */
59 sh_tlb_set_asid(0);
60
61 cr = SH4_MMUCR_AT; /* address translation enabled */
62 cr |= SH4_MMUCR_TI; /* TLB invalidate */
63 cr |= SH4_MMUCR_SQMD; /* store queues not accessible to user */
64
65 /* resereve TLB entries for wired u-area (cf. sh4_switch_resume) */
66 cr |= (SH4_UTLB_ENTRY - UPAGES) << SH4_MMUCR_URB_SHIFT;
67
68 _reg_write_4(SH4_MMUCR, cr);
69 SH4_MMU_HAZARD;
70 }
71
72
73 /*
74 * Perform associative write to UTLB. Must be called via its P2
75 * address. Note, the ASID match is against PTEH, not "va". The
76 * caller is responsible for saving/setting/restoring PTEH.
77 */
78 static __noinline void
do_tlb_assoc(uint32_t va)79 do_tlb_assoc(uint32_t va)
80 {
81
82 _reg_write_4(SH4_UTLB_AA | SH4_UTLB_A, va);
83 PAD_P1_SWITCH;
84 }
85
86
87 void
sh4_tlb_invalidate_addr(int asid,vaddr_t va)88 sh4_tlb_invalidate_addr(int asid, vaddr_t va)
89 {
90 void (*tlb_assoc_p2)(uint32_t);
91 uint32_t opteh;
92 uint32_t sr;
93
94 tlb_assoc_p2 = SH3_P2SEG_FUNC(do_tlb_assoc);
95
96 va &= SH4_UTLB_AA_VPN_MASK;
97
98 sr = _cpu_exception_suspend();
99 opteh = _reg_read_4(SH4_PTEH); /* save current ASID */
100
101 _reg_write_4(SH4_PTEH, asid); /* set ASID for associative write */
102 (*tlb_assoc_p2)(va); /* invalidate { va, ASID } entry if exists */
103
104 _reg_write_4(SH4_PTEH, opteh); /* restore ASID */
105 _cpu_set_sr(sr);
106 }
107
108
109 static __noinline void
do_invalidate_asid(int asid)110 do_invalidate_asid(int asid)
111 {
112 int e;
113
114 for (e = 0; e < SH4_UTLB_ENTRY; ++e) {
115 uint32_t addr = SH4_UTLB_AA | (e << SH4_UTLB_E_SHIFT);
116 uint32_t aa = _reg_read_4(addr);
117 if ((aa & SH4_UTLB_AA_ASID_MASK) == asid)
118 _reg_write_4(addr, 0);
119 }
120
121 for (e = 0; e < SH4_ITLB_ENTRY; ++e) {
122 uint32_t addr = SH4_ITLB_AA | (e << SH4_UTLB_E_SHIFT);
123 uint32_t aa = _reg_read_4(addr);
124 if ((aa & SH4_ITLB_AA_ASID_MASK) == asid)
125 _reg_write_4(addr, 0);
126 }
127
128 PAD_P1_SWITCH;
129 }
130
131
132 void
sh4_tlb_invalidate_asid(int asid)133 sh4_tlb_invalidate_asid(int asid)
134 {
135 void (*invalidate_asid_p2)(int);
136 uint32_t sr;
137
138 KDASSERT(asid != 0);
139
140 invalidate_asid_p2 = SH3_P2SEG_FUNC(do_invalidate_asid);
141
142 sr = _cpu_exception_suspend();
143 (*invalidate_asid_p2)(asid);
144
145 _cpu_set_sr(sr);
146 }
147
148
149 static __noinline void
do_invalidate_all(uint32_t limit)150 do_invalidate_all(uint32_t limit)
151 {
152 int e;
153
154 for (e = 0; e < limit; ++e) {
155 _reg_write_4(SH4_UTLB_AA | (e << SH4_UTLB_E_SHIFT), 0);
156 _reg_write_4(SH4_UTLB_DA1 | (e << SH4_UTLB_E_SHIFT), 0);
157 }
158
159 for (e = 0; e < SH4_ITLB_ENTRY; ++e) {
160 _reg_write_4(SH4_ITLB_AA | (e << SH4_ITLB_E_SHIFT), 0);
161 _reg_write_4(SH4_ITLB_DA1 | (e << SH4_ITLB_E_SHIFT), 0);
162 }
163
164 PAD_P1_SWITCH;
165 }
166
167
168 void
sh4_tlb_invalidate_all(void)169 sh4_tlb_invalidate_all(void)
170 {
171 void (*invalidate_all_p2)(uint32_t);
172 uint32_t limit;
173 uint32_t sr;
174
175 invalidate_all_p2 = SH3_P2SEG_FUNC(do_invalidate_all);
176
177 /* do we resereve TLB entries for wired u-area? */
178 limit = _reg_read_4(SH4_MMUCR) & SH4_MMUCR_URB_MASK;
179 limit = limit ? (limit >> SH4_MMUCR_URB_SHIFT) : SH4_UTLB_ENTRY;
180
181 sr = _cpu_exception_suspend();
182 (*invalidate_all_p2)(limit);
183
184 _cpu_set_sr(sr);
185 }
186
187
188 void
sh4_tlb_update(int asid,vaddr_t va,uint32_t pte)189 sh4_tlb_update(int asid, vaddr_t va, uint32_t pte)
190 {
191 void (*tlb_assoc_p2)(uint32_t);
192 uint32_t opteh, ptel;
193 uint32_t sr;
194
195 KDASSERT(asid < 0x100);
196 KDASSERT(va != 0);
197 KDASSERT((pte & ~PGOFSET) != 0);
198
199 tlb_assoc_p2 = SH3_P2SEG_FUNC(do_tlb_assoc);
200
201 sr = _cpu_exception_suspend();
202 opteh = _reg_read_4(SH4_PTEH); /* save old ASID */
203
204 /*
205 * Invalidate { va, ASID } entry if exists. Only ASID is
206 * matched in PTEH, but set the va part too for ldtlb below.
207 */
208 _reg_write_4(SH4_PTEH, (va & ~PGOFSET) | asid);
209 (*tlb_assoc_p2)(va & SH4_UTLB_AA_VPN_MASK);
210
211 /* Load new entry (PTEH is already set) */
212 ptel = pte & PG_HW_BITS;
213 _reg_write_4(SH4_PTEL, ptel);
214 if (pte & _PG_PCMCIA) {
215 _reg_write_4(SH4_PTEA,
216 (pte >> _PG_PCMCIA_SHIFT) & SH4_PTEA_SA_MASK);
217 } else {
218 _reg_write_4(SH4_PTEA, 0);
219 }
220 __asm volatile("ldtlb; nop");
221
222 _reg_write_4(SH4_PTEH, opteh); /* restore old ASID */
223 _cpu_set_sr(sr);
224 }
225