1 /*
2 * CDDL HEADER START
3 *
4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
7 *
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
12 *
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
18 *
19 * CDDL HEADER END
20 */
21 /*
22 * Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
23 */
24
25 /*
26 * PX mmu initialization and configuration
27 */
28 #include <sys/types.h>
29 #include <sys/kmem.h>
30 #include <sys/async.h>
31 #include <sys/sysmacros.h>
32 #include <sys/sunddi.h>
33 #include <sys/ddi_impldefs.h>
34 #include <sys/vmem.h>
35 #include <sys/machsystm.h> /* lddphys() */
36 #include <sys/iommutsb.h>
37 #include "px_obj.h"
38
39 int
px_mmu_attach(px_t * px_p)40 px_mmu_attach(px_t *px_p)
41 {
42 dev_info_t *dip = px_p->px_dip;
43 px_mmu_t *mmu_p;
44 uint32_t tsb_i = 0;
45 char map_name[32];
46 px_dvma_range_prop_t *dvma_prop;
47 int dvma_prop_len;
48 uint32_t cache_size, tsb_entries;
49
50 /*
51 * Allocate mmu state structure and link it to the
52 * px state structure.
53 */
54 mmu_p = kmem_zalloc(sizeof (px_mmu_t), KM_SLEEP);
55 if (mmu_p == NULL)
56 return (DDI_FAILURE);
57
58 px_p->px_mmu_p = mmu_p;
59 mmu_p->mmu_px_p = px_p;
60 mmu_p->mmu_inst = ddi_get_instance(dip);
61
62 /*
63 * Check for "virtual-dma" property that specifies
64 * the DVMA range.
65 */
66 if (ddi_getlongprop(DDI_DEV_T_ANY, dip, DDI_PROP_DONTPASS,
67 "virtual-dma", (caddr_t)&dvma_prop, &dvma_prop_len) !=
68 DDI_PROP_SUCCESS) {
69
70 DBG(DBG_ATTACH, dip, "Getting virtual-dma failed\n");
71
72 kmem_free(mmu_p, sizeof (px_mmu_t));
73 px_p->px_mmu_p = NULL;
74
75 return (DDI_FAILURE);
76 }
77
78 mmu_p->mmu_dvma_base = dvma_prop->dvma_base;
79 mmu_p->mmu_dvma_end = dvma_prop->dvma_base +
80 (dvma_prop->dvma_len - 1);
81 tsb_entries = MMU_BTOP(dvma_prop->dvma_len);
82
83 kmem_free(dvma_prop, dvma_prop_len);
84
85 /*
86 * Setup base and bounds for DVMA and bypass mappings.
87 */
88 mmu_p->mmu_dvma_cache_locks =
89 kmem_zalloc(px_dvma_page_cache_entries, KM_SLEEP);
90
91 mmu_p->dvma_base_pg = MMU_BTOP(mmu_p->mmu_dvma_base);
92 mmu_p->mmu_dvma_reserve = tsb_entries >> 1;
93 mmu_p->dvma_end_pg = MMU_BTOP(mmu_p->mmu_dvma_end);
94
95 /*
96 * Create a virtual memory map for dvma address space.
97 * Reserve 'size' bytes of low dvma space for fast track cache.
98 */
99 (void) snprintf(map_name, sizeof (map_name), "%s%d_dvma",
100 ddi_driver_name(dip), ddi_get_instance(dip));
101
102 cache_size = MMU_PTOB(px_dvma_page_cache_entries *
103 px_dvma_page_cache_clustsz);
104 mmu_p->mmu_dvma_fast_end = mmu_p->mmu_dvma_base +
105 cache_size - 1;
106
107 mmu_p->mmu_dvma_map = vmem_create(map_name,
108 (void *)(mmu_p->mmu_dvma_fast_end + 1),
109 MMU_PTOB(tsb_entries) - cache_size, MMU_PAGE_SIZE,
110 NULL, NULL, NULL, MMU_PAGE_SIZE, VM_SLEEP);
111
112 mutex_init(&mmu_p->dvma_debug_lock, NULL, MUTEX_DRIVER, NULL);
113
114 for (tsb_i = 0; tsb_i < tsb_entries; tsb_i++) {
115 r_addr_t ra = 0;
116 io_attributes_t attr;
117 caddr_t va;
118
119 if (px_lib_iommu_getmap(px_p->px_dip, PCI_TSBID(0, tsb_i),
120 &attr, &ra) != DDI_SUCCESS)
121 continue;
122
123 va = (caddr_t)(MMU_PTOB(mmu_p->dvma_base_pg + tsb_i));
124
125 if (va <= (caddr_t)mmu_p->mmu_dvma_fast_end) {
126 uint32_t cache_i;
127
128 /*
129 * the va is within the *fast* dvma range; therefore,
130 * lock its fast dvma page cache cluster in order to
131 * both preserve the TTE and prevent the use of this
132 * fast dvma page cache cluster by px_dvma_map_fast().
133 * the lock value 0xFF comes from ldstub().
134 */
135 cache_i = tsb_i / px_dvma_page_cache_clustsz;
136 ASSERT(cache_i < px_dvma_page_cache_entries);
137 mmu_p->mmu_dvma_cache_locks[cache_i] = 0xFF;
138 } else {
139 (void) vmem_xalloc(mmu_p->mmu_dvma_map, MMU_PAGE_SIZE,
140 MMU_PAGE_SIZE, 0, 0, va, va + MMU_PAGE_SIZE,
141 VM_NOSLEEP | VM_BESTFIT | VM_PANIC);
142 }
143 }
144
145 return (DDI_SUCCESS);
146 }
147
148 void
px_mmu_detach(px_t * px_p)149 px_mmu_detach(px_t *px_p)
150 {
151 px_mmu_t *mmu_p = px_p->px_mmu_p;
152
153 (void) px_lib_iommu_detach(px_p);
154
155 /*
156 * Free the dvma resource map.
157 */
158 vmem_destroy(mmu_p->mmu_dvma_map);
159
160 kmem_free(mmu_p->mmu_dvma_cache_locks,
161 px_dvma_page_cache_entries);
162
163 if (PX_DVMA_DBG_ON(mmu_p))
164 px_dvma_debug_fini(mmu_p);
165
166 mutex_destroy(&mmu_p->dvma_debug_lock);
167
168 /*
169 * Free the mmu state structure.
170 */
171 kmem_free(mmu_p, sizeof (px_mmu_t));
172 px_p->px_mmu_p = NULL;
173 }
174
175 int
px_mmu_map_pages(px_mmu_t * mmu_p,ddi_dma_impl_t * mp,px_dvma_addr_t dvma_pg,size_t npages,size_t pfn_index)176 px_mmu_map_pages(px_mmu_t *mmu_p, ddi_dma_impl_t *mp, px_dvma_addr_t dvma_pg,
177 size_t npages, size_t pfn_index)
178 {
179 dev_info_t *dip = mmu_p->mmu_px_p->px_dip;
180 px_dvma_addr_t pg_index = MMU_PAGE_INDEX(mmu_p, dvma_pg);
181 io_attributes_t attr = PX_GET_MP_TTE(mp->dmai_tte);
182
183 ASSERT(npages <= mp->dmai_ndvmapages);
184 DBG(DBG_MAP_WIN, dip, "px_mmu_map_pages:%x+%x=%x "
185 "npages=0x%x pfn_index=0x%x\n", (uint_t)mmu_p->dvma_base_pg,
186 (uint_t)pg_index, dvma_pg, (uint_t)npages, (uint_t)pfn_index);
187
188 if (px_lib_iommu_map(dip, PCI_TSBID(0, pg_index), npages,
189 PX_ADD_ATTR_EXTNS(attr, mp->dmai_bdf), (void *)mp, pfn_index,
190 MMU_MAP_PFN) != DDI_SUCCESS) {
191 DBG(DBG_MAP_WIN, dip, "px_mmu_map_pages: "
192 "px_lib_iommu_map failed\n");
193
194 return (DDI_FAILURE);
195 }
196
197 if (!PX_MAP_BUFZONE(mp))
198 goto done;
199
200 DBG(DBG_MAP_WIN, dip, "px_mmu_map_pages: redzone pg=%x\n",
201 pg_index + npages);
202
203 ASSERT(PX_HAS_REDZONE(mp));
204
205 if (px_lib_iommu_map(dip, PCI_TSBID(0, pg_index + npages), 1,
206 PX_ADD_ATTR_EXTNS(attr, mp->dmai_bdf), (void *)mp,
207 pfn_index + npages - 1, MMU_MAP_PFN) != DDI_SUCCESS) {
208 DBG(DBG_MAP_WIN, dip, "px_mmu_map_pages: mapping "
209 "REDZONE page failed\n");
210
211 if (px_lib_iommu_demap(dip, PCI_TSBID(0, pg_index), npages)
212 != DDI_SUCCESS) {
213 DBG(DBG_MAP_WIN, dip, "px_lib_iommu_demap: failed\n");
214 }
215 return (DDI_FAILURE);
216 }
217
218 done:
219 if (PX_DVMA_DBG_ON(mmu_p))
220 px_dvma_alloc_debug(mmu_p, (char *)mp->dmai_mapping,
221 mp->dmai_size, mp);
222
223 return (DDI_SUCCESS);
224 }
225
226 void
px_mmu_unmap_pages(px_mmu_t * mmu_p,ddi_dma_impl_t * mp,px_dvma_addr_t dvma_pg,uint_t npages)227 px_mmu_unmap_pages(px_mmu_t *mmu_p, ddi_dma_impl_t *mp, px_dvma_addr_t dvma_pg,
228 uint_t npages)
229 {
230 px_dvma_addr_t pg_index = MMU_PAGE_INDEX(mmu_p, dvma_pg);
231
232 DBG(DBG_UNMAP_WIN, mmu_p->mmu_px_p->px_dip,
233 "px_mmu_unmap_pages:%x+%x=%x npages=0x%x\n",
234 (uint_t)mmu_p->dvma_base_pg, (uint_t)pg_index, dvma_pg,
235 (uint_t)npages);
236
237 if (px_lib_iommu_demap(mmu_p->mmu_px_p->px_dip,
238 PCI_TSBID(0, pg_index), npages) != DDI_SUCCESS) {
239 DBG(DBG_UNMAP_WIN, mmu_p->mmu_px_p->px_dip,
240 "px_lib_iommu_demap: failed\n");
241 }
242
243 if (!PX_MAP_BUFZONE(mp))
244 return;
245
246 DBG(DBG_UNMAP_WIN, mmu_p->mmu_px_p->px_dip, "px_mmu_unmap_pages: "
247 "redzone pg=%x\n", pg_index + npages);
248
249 ASSERT(PX_HAS_REDZONE(mp));
250
251 if (px_lib_iommu_demap(mmu_p->mmu_px_p->px_dip,
252 PCI_TSBID(0, pg_index + npages), 1) != DDI_SUCCESS) {
253 DBG(DBG_UNMAP_WIN, mmu_p->mmu_px_p->px_dip,
254 "px_lib_iommu_demap: failed\n");
255 }
256 }
257
258 /*
259 * px_mmu_map_window - map a dvma window into the mmu
260 * used by: px_dma_win(), px_dma_ctlops() - DDI_DMA_MOVWIN, DDI_DMA_NEXTWIN
261 * return value: none
262 */
263 /*ARGSUSED*/
264 int
px_mmu_map_window(px_mmu_t * mmu_p,ddi_dma_impl_t * mp,px_window_t win_no)265 px_mmu_map_window(px_mmu_t *mmu_p, ddi_dma_impl_t *mp, px_window_t win_no)
266 {
267 uint32_t obj_pg0_off = mp->dmai_roffset;
268 uint32_t win_pg0_off = win_no ? 0 : obj_pg0_off;
269 size_t win_size = mp->dmai_winsize;
270 size_t pfn_index = win_size * win_no; /* temp value */
271 size_t obj_off = win_no ? pfn_index - obj_pg0_off : 0; /* xferred sz */
272 px_dvma_addr_t dvma_pg = MMU_BTOP(mp->dmai_mapping);
273 size_t res_size = mp->dmai_object.dmao_size - obj_off + win_pg0_off;
274 int ret = DDI_SUCCESS;
275
276 ASSERT(!(win_size & MMU_PAGE_OFFSET));
277 if (win_no >= mp->dmai_nwin)
278 return (ret);
279 if (res_size < win_size) /* last window */
280 win_size = res_size; /* mp->dmai_winsize unchanged */
281
282 mp->dmai_mapping = MMU_PTOB(dvma_pg) | win_pg0_off;
283 mp->dmai_size = win_size - win_pg0_off; /* cur win xferrable size */
284 mp->dmai_offset = obj_off; /* win offset into object */
285 pfn_index = MMU_BTOP(pfn_index); /* index into pfnlist */
286 ret = px_mmu_map_pages(mmu_p, mp, dvma_pg, MMU_BTOPR(win_size),
287 pfn_index);
288
289 return (ret);
290 }
291
292 /*
293 * px_mmu_unmap_window
294 * This routine is called to break down the mmu mappings to a dvma window.
295 * Non partial mappings are viewed as single window mapping.
296 * used by: px_dma_unbindhdl(), px_dma_window(),
297 * and px_dma_ctlops() - DDI_DMA_FREE, DDI_DMA_MOVWIN, DDI_DMA_NEXTWIN
298 * return value: none
299 */
300 /*ARGSUSED*/
301 void
px_mmu_unmap_window(px_mmu_t * mmu_p,ddi_dma_impl_t * mp)302 px_mmu_unmap_window(px_mmu_t *mmu_p, ddi_dma_impl_t *mp)
303 {
304 px_dvma_addr_t dvma_pg = MMU_BTOP(mp->dmai_mapping);
305 uint_t npages = MMU_BTOP(mp->dmai_winsize);
306
307 px_mmu_unmap_pages(mmu_p, mp, dvma_pg, npages);
308
309 if (PX_DVMA_DBG_ON(mmu_p))
310 px_dvma_free_debug(mmu_p, (char *)mp->dmai_mapping,
311 mp->dmai_size, mp);
312 }
313
314
315 #if 0
316 /*
317 * The following table is for reference only. It denotes the
318 * the TSB table size measured in number of 8 byte entries.
319 * It is represented by bits 3:0 in the MMU TSB CTRL REG.
320 */
321 static int px_mmu_tsb_sizes[] = {
322 0x0, /* 1K */
323 0x1, /* 2K */
324 0x2, /* 4K */
325 0x3, /* 8K */
326 0x4, /* 16K */
327 0x5, /* 32K */
328 0x6, /* 64K */
329 0x7, /* 128K */
330 0x8 /* 256K */
331 };
332 #endif
333
334 static char *px_mmu_errsts[] = {
335 "Protection Error", "Invalid Error", "Timeout", "ECC Error(UE)"
336 };
337
338 /*ARGSUSED*/
339 static int
px_log_mmu_err(px_t * px_p)340 px_log_mmu_err(px_t *px_p)
341 {
342 /*
343 * Place holder, the correct eror bits need tobe logged.
344 */
345 return (0);
346 }
347