xref: /openbsd-src/sys/arch/sh/sh/mmu_sh3.c (revision 0f1d16269626f6e5c6ff94d4d29a9f92cb277dbf)
1 /*	$OpenBSD: mmu_sh3.c,v 1.3 2016/03/05 17:16:33 tobiasu Exp $	*/
2 /*	$NetBSD: mmu_sh3.c,v 1.11 2006/03/04 01:13:35 uwe Exp $	*/
3 
4 /*-
5  * Copyright (c) 2002 The NetBSD Foundation, Inc.
6  * All rights reserved.
7  *
8  * This code is derived from software contributed to The NetBSD Foundation
9  * by UCHIYAMA Yasushi.
10  *
11  * Redistribution and use in source and binary forms, with or without
12  * modification, are permitted provided that the following conditions
13  * are met:
14  * 1. Redistributions of source code must retain the above copyright
15  *    notice, this list of conditions and the following disclaimer.
16  * 2. Redistributions in binary form must reproduce the above copyright
17  *    notice, this list of conditions and the following disclaimer in the
18  *    documentation and/or other materials provided with the distribution.
19  *
20  * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
21  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
22  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
23  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
24  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
25  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
26  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
27  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
28  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
29  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
30  * POSSIBILITY OF SUCH DAMAGE.
31  */
32 
33 #include <sys/param.h>
34 #include <sys/systm.h>
35 
36 #include <sh/pte.h>	/* OpenBSD/sh specific PTE */
37 #include <sh/mmu.h>
38 #include <sh/mmu_sh3.h>
39 
40 void
sh3_mmu_start(void)41 sh3_mmu_start(void)
42 {
43 	/* Zero clear all TLB entry */
44 	sh3_tlb_invalidate_all();
45 
46 	/* Set current ASID to 0 */
47 	sh_tlb_set_asid(0);
48 
49 	_reg_write_4(SH3_MMUCR, SH3_MMUCR_AT | SH3_MMUCR_TF);
50 }
51 
52 void
sh3_tlb_invalidate_addr(int asid,vaddr_t va)53 sh3_tlb_invalidate_addr(int asid, vaddr_t va)
54 {
55 	uint32_t a, d;
56 	int w;
57 
58 	d = (va & SH3_MMUAA_D_VPN_MASK_4K) | asid;  /* 4K page */
59 	va = va & SH3_MMU_VPN_MASK;   /* [16:12] entry index */
60 
61 	/* Probe entry and invalidate it. */
62 	for (w = 0; w < SH3_MMU_WAY; w++) {
63 		a = va | (w << SH3_MMU_WAY_SHIFT); /* way [9:8] */
64 		if ((_reg_read_4(SH3_MMUAA | a) &
65 		    (SH3_MMUAA_D_VPN_MASK_4K | SH3_MMUAA_D_ASID_MASK)) == d) {
66 			_reg_write_4(SH3_MMUAA | a, 0);
67 			break;
68 		}
69 	}
70 }
71 
72 void
sh3_tlb_invalidate_asid(int asid)73 sh3_tlb_invalidate_asid(int asid)
74 {
75 	uint32_t aw, a;
76 	int e, w;
77 
78 	/* Invalidate entry attribute to ASID */
79 	for (w = 0; w < SH3_MMU_WAY; w++) {
80 		aw = (w << SH3_MMU_WAY_SHIFT);
81 		for (e = 0; e < SH3_MMU_ENTRY; e++) {
82 			a = aw | (e << SH3_MMU_VPN_SHIFT);
83 			if ((_reg_read_4(SH3_MMUAA | a) &
84 			    SH3_MMUAA_D_ASID_MASK) == asid) {
85 				_reg_write_4(SH3_MMUAA | a, 0);
86 			}
87 		}
88 	}
89 }
90 
91 void
sh3_tlb_invalidate_all(void)92 sh3_tlb_invalidate_all(void)
93 {
94 	uint32_t aw, a;
95 	int e, w;
96 
97 	/* Zero clear all TLB entry to avoid unexpected VPN match. */
98 	for (w = 0; w < SH3_MMU_WAY; w++) {
99 		aw = (w << SH3_MMU_WAY_SHIFT);
100 		for (e = 0; e < SH3_MMU_ENTRY; e++) {
101 			a = aw | (e << SH3_MMU_VPN_SHIFT);
102 			_reg_write_4(SH3_MMUAA | a, 0);
103 			_reg_write_4(SH3_MMUDA | a, 0);
104 		}
105 	}
106 }
107 
108 void
sh3_tlb_update(int asid,vaddr_t va,uint32_t pte)109 sh3_tlb_update(int asid, vaddr_t va, uint32_t pte)
110 {
111 	uint32_t oasid;
112 
113 	KDASSERT(asid < 0x100 && (pte & ~PGOFSET) != 0 && va != 0);
114 
115 	/* Save old ASID */
116 	oasid = _reg_read_4(SH3_PTEH) & SH3_PTEH_ASID_MASK;
117 
118 	/* Invalidate old entry (if any) */
119 	sh3_tlb_invalidate_addr(asid, va);
120 
121 	/* Load new entry */
122 	_reg_write_4(SH3_PTEH, (va & ~PGOFSET) | asid);
123 	_reg_write_4(SH3_PTEL, pte & PG_HW_BITS);
124 	__asm volatile("ldtlb");
125 
126 	/* Restore old ASID */
127 	if (asid != oasid)
128 		_reg_write_4(SH3_PTEH, oasid);
129 }
130