xref: /onnv-gate/usr/src/uts/sun4v/os/intrq.c (revision 9337:1f7afb6a061f)
10Sstevel@tonic-gate /*
20Sstevel@tonic-gate  * CDDL HEADER START
30Sstevel@tonic-gate  *
40Sstevel@tonic-gate  * The contents of this file are subject to the terms of the
51991Sheppo  * Common Development and Distribution License (the "License").
61991Sheppo  * You may not use this file except in compliance with the License.
70Sstevel@tonic-gate  *
80Sstevel@tonic-gate  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
90Sstevel@tonic-gate  * or http://www.opensolaris.org/os/licensing.
100Sstevel@tonic-gate  * See the License for the specific language governing permissions
110Sstevel@tonic-gate  * and limitations under the License.
120Sstevel@tonic-gate  *
130Sstevel@tonic-gate  * When distributing Covered Code, include this CDDL HEADER in each
140Sstevel@tonic-gate  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
150Sstevel@tonic-gate  * If applicable, add the following below this CDDL HEADER, with the
160Sstevel@tonic-gate  * fields enclosed by brackets "[]" replaced with your own identifying
170Sstevel@tonic-gate  * information: Portions Copyright [yyyy] [name of copyright owner]
180Sstevel@tonic-gate  *
190Sstevel@tonic-gate  * CDDL HEADER END
200Sstevel@tonic-gate  */
210Sstevel@tonic-gate /*
22*9337SAnthony.Yznaga@Sun.COM  * Copyright 2009 Sun Microsystems, Inc.  All rights reserved.
230Sstevel@tonic-gate  * Use is subject to license terms.
240Sstevel@tonic-gate  */
250Sstevel@tonic-gate 
260Sstevel@tonic-gate #include <sys/machsystm.h>
270Sstevel@tonic-gate #include <sys/cpu.h>
280Sstevel@tonic-gate #include <sys/intreg.h>
290Sstevel@tonic-gate #include <sys/machcpuvar.h>
300Sstevel@tonic-gate #include <vm/hat_sfmmu.h>
310Sstevel@tonic-gate #include <sys/error.h>
320Sstevel@tonic-gate #include <sys/hypervisor_api.h>
330Sstevel@tonic-gate 
340Sstevel@tonic-gate void
cpu_intrq_register(struct cpu * cpu)350Sstevel@tonic-gate cpu_intrq_register(struct cpu *cpu)
360Sstevel@tonic-gate {
370Sstevel@tonic-gate 	struct machcpu *mcpup = &cpu->cpu_m;
380Sstevel@tonic-gate 	uint64_t ret;
390Sstevel@tonic-gate 
400Sstevel@tonic-gate 	ret = hv_cpu_qconf(INTR_CPU_Q, mcpup->cpu_q_base_pa, cpu_q_entries);
410Sstevel@tonic-gate 	if (ret != H_EOK)
420Sstevel@tonic-gate 		cmn_err(CE_PANIC, "cpu%d: cpu_mondo queue configuration "
430Sstevel@tonic-gate 		    "failed, error %lu", cpu->cpu_id, ret);
440Sstevel@tonic-gate 
450Sstevel@tonic-gate 	ret = hv_cpu_qconf(INTR_DEV_Q, mcpup->dev_q_base_pa, dev_q_entries);
460Sstevel@tonic-gate 	if (ret != H_EOK)
470Sstevel@tonic-gate 		cmn_err(CE_PANIC, "cpu%d: dev_mondo queue configuration "
480Sstevel@tonic-gate 		    "failed, error %lu", cpu->cpu_id, ret);
490Sstevel@tonic-gate 
501991Sheppo 	ret = hv_cpu_qconf(CPU_RQ, mcpup->cpu_rq_base_pa, cpu_rq_entries);
510Sstevel@tonic-gate 	if (ret != H_EOK)
520Sstevel@tonic-gate 		cmn_err(CE_PANIC, "cpu%d: resumable error queue configuration "
530Sstevel@tonic-gate 		    "failed, error %lu", cpu->cpu_id, ret);
540Sstevel@tonic-gate 
551991Sheppo 	ret = hv_cpu_qconf(CPU_NRQ, mcpup->cpu_nrq_base_pa, cpu_nrq_entries);
560Sstevel@tonic-gate 	if (ret != H_EOK)
570Sstevel@tonic-gate 		cmn_err(CE_PANIC, "cpu%d: non-resumable error queue "
58*9337SAnthony.Yznaga@Sun.COM 		    "configuration failed, error %lu", cpu->cpu_id, ret);
590Sstevel@tonic-gate }
600Sstevel@tonic-gate 
614050Sjb145095 int
cpu_intrq_setup(struct cpu * cpu)620Sstevel@tonic-gate cpu_intrq_setup(struct cpu *cpu)
630Sstevel@tonic-gate {
640Sstevel@tonic-gate 	struct machcpu *mcpup = &cpu->cpu_m;
654050Sjb145095 	size_t size;
664050Sjb145095 
674050Sjb145095 	/*
684050Sjb145095 	 * This routine will return with an error return if any
694050Sjb145095 	 * contig_mem_alloc() fails.  It is expected that the caller will
704050Sjb145095 	 * call cpu_intrq_cleanup() (or cleanup_cpu_common() which will).
714050Sjb145095 	 * That will cleanly free only those blocks that were alloc'd.
724050Sjb145095 	 */
730Sstevel@tonic-gate 
740Sstevel@tonic-gate 	/*
750Sstevel@tonic-gate 	 * Allocate mondo data for xcalls.
760Sstevel@tonic-gate 	 */
770Sstevel@tonic-gate 	mcpup->mondo_data = contig_mem_alloc(INTR_REPORT_SIZE);
780Sstevel@tonic-gate 
794050Sjb145095 	if (mcpup->mondo_data == NULL) {
804050Sjb145095 		cmn_err(CE_NOTE, "cpu%d: cpu mondo_data allocation failed",
81*9337SAnthony.Yznaga@Sun.COM 		    cpu->cpu_id);
824050Sjb145095 		return (ENOMEM);
834050Sjb145095 	}
840Sstevel@tonic-gate 	/*
850Sstevel@tonic-gate 	 * va_to_pa() is too expensive to call for every crosscall
860Sstevel@tonic-gate 	 * so we do it here at init time and save it in machcpu.
870Sstevel@tonic-gate 	 */
880Sstevel@tonic-gate 	mcpup->mondo_data_ra = va_to_pa(mcpup->mondo_data);
890Sstevel@tonic-gate 
900Sstevel@tonic-gate 	/*
91*9337SAnthony.Yznaga@Sun.COM 	 *  Allocate a per-cpu list of ncpu_guest_max for xcalls
924050Sjb145095 	 */
93*9337SAnthony.Yznaga@Sun.COM 	size = ncpu_guest_max * sizeof (uint16_t);
944050Sjb145095 	if (size < INTR_REPORT_SIZE)
954050Sjb145095 		size = INTR_REPORT_SIZE;
964050Sjb145095 
97*9337SAnthony.Yznaga@Sun.COM 	/*
98*9337SAnthony.Yznaga@Sun.COM 	 * contig_mem_alloc() requires size to be a power of 2.
99*9337SAnthony.Yznaga@Sun.COM 	 * Increase size to a power of 2 if necessary.
100*9337SAnthony.Yznaga@Sun.COM 	 */
101*9337SAnthony.Yznaga@Sun.COM 	if ((size & (size - 1)) != 0) {
102*9337SAnthony.Yznaga@Sun.COM 		size = 1 << highbit(size);
103*9337SAnthony.Yznaga@Sun.COM 	}
104*9337SAnthony.Yznaga@Sun.COM 
1054050Sjb145095 	mcpup->cpu_list = contig_mem_alloc(size);
1064050Sjb145095 
1074050Sjb145095 	if (mcpup->cpu_list == NULL) {
1084050Sjb145095 		cmn_err(CE_NOTE, "cpu%d: cpu cpu_list allocation failed",
109*9337SAnthony.Yznaga@Sun.COM 		    cpu->cpu_id);
1104050Sjb145095 		return (ENOMEM);
1114050Sjb145095 	}
1124050Sjb145095 	mcpup->cpu_list_ra = va_to_pa(mcpup->cpu_list);
1134050Sjb145095 
1144050Sjb145095 	/*
1150Sstevel@tonic-gate 	 * Allocate sun4v interrupt and error queues.
1160Sstevel@tonic-gate 	 */
1174050Sjb145095 	size = cpu_q_entries * INTR_REPORT_SIZE;
1184050Sjb145095 
1194050Sjb145095 	mcpup->cpu_q_va = contig_mem_alloc(size);
1204050Sjb145095 
1214050Sjb145095 	if (mcpup->cpu_q_va == NULL) {
1224050Sjb145095 		cmn_err(CE_NOTE, "cpu%d: cpu intrq allocation failed",
123*9337SAnthony.Yznaga@Sun.COM 		    cpu->cpu_id);
1244050Sjb145095 		return (ENOMEM);
1254050Sjb145095 	}
1260Sstevel@tonic-gate 	mcpup->cpu_q_base_pa = va_to_pa(mcpup->cpu_q_va);
1274050Sjb145095 	mcpup->cpu_q_size = size;
1280Sstevel@tonic-gate 
1294050Sjb145095 	/*
1304050Sjb145095 	 * Allocate device queues
1314050Sjb145095 	 */
1324050Sjb145095 	size = dev_q_entries * INTR_REPORT_SIZE;
1334050Sjb145095 
1344050Sjb145095 	mcpup->dev_q_va = contig_mem_alloc(size);
1354050Sjb145095 
1364050Sjb145095 	if (mcpup->dev_q_va == NULL) {
1374050Sjb145095 		cmn_err(CE_NOTE, "cpu%d: dev intrq allocation failed",
138*9337SAnthony.Yznaga@Sun.COM 		    cpu->cpu_id);
1394050Sjb145095 		return (ENOMEM);
1404050Sjb145095 	}
1410Sstevel@tonic-gate 	mcpup->dev_q_base_pa = va_to_pa(mcpup->dev_q_va);
1424050Sjb145095 	mcpup->dev_q_size = size;
1430Sstevel@tonic-gate 
1444050Sjb145095 	/*
1454050Sjb145095 	 * Allocate resumable queue and its kernel buffer
1464050Sjb145095 	 */
1474050Sjb145095 	size = cpu_rq_entries * Q_ENTRY_SIZE;
1484050Sjb145095 
1494050Sjb145095 	mcpup->cpu_rq_va = contig_mem_alloc(2 * size);
1504050Sjb145095 
1514050Sjb145095 	if (mcpup->cpu_rq_va == NULL) {
1524050Sjb145095 		cmn_err(CE_NOTE, "cpu%d: resumable queue allocation failed",
153*9337SAnthony.Yznaga@Sun.COM 		    cpu->cpu_id);
1544050Sjb145095 		return (ENOMEM);
1554050Sjb145095 	}
1560Sstevel@tonic-gate 	mcpup->cpu_rq_base_pa = va_to_pa(mcpup->cpu_rq_va);
1574050Sjb145095 	mcpup->cpu_rq_size = size;
1580Sstevel@tonic-gate 	/* zero out the memory */
1594050Sjb145095 	bzero(mcpup->cpu_rq_va, 2 * size);
1600Sstevel@tonic-gate 
1614050Sjb145095 	/*
1624050Sjb145095 	 * Allocate non-resumable queues
1634050Sjb145095 	 */
1644050Sjb145095 	size = cpu_nrq_entries * Q_ENTRY_SIZE;
1654050Sjb145095 
1664050Sjb145095 	mcpup->cpu_nrq_va = contig_mem_alloc(2 * size);
1674050Sjb145095 
1684050Sjb145095 	if (mcpup->cpu_nrq_va == NULL) {
1694050Sjb145095 		cmn_err(CE_NOTE, "cpu%d: nonresumable queue allocation failed",
170*9337SAnthony.Yznaga@Sun.COM 		    cpu->cpu_id);
1714050Sjb145095 		return (ENOMEM);
1724050Sjb145095 	}
1730Sstevel@tonic-gate 	mcpup->cpu_nrq_base_pa = va_to_pa(mcpup->cpu_nrq_va);
1744050Sjb145095 	mcpup->cpu_nrq_size = size;
1750Sstevel@tonic-gate 	/* zero out the memory */
1764050Sjb145095 	bzero(mcpup->cpu_nrq_va, 2 * size);
1774050Sjb145095 
1784050Sjb145095 	return (0);
1791991Sheppo }
1801991Sheppo 
1811991Sheppo void
cpu_intrq_cleanup(struct cpu * cpu)1821991Sheppo cpu_intrq_cleanup(struct cpu *cpu)
1831991Sheppo {
1841991Sheppo 	struct machcpu *mcpup = &cpu->cpu_m;
1851991Sheppo 	int cpu_list_size;
1861991Sheppo 	uint64_t cpu_q_size;
1871991Sheppo 	uint64_t dev_q_size;
1881991Sheppo 	uint64_t cpu_rq_size;
1891991Sheppo 	uint64_t cpu_nrq_size;
1901991Sheppo 
1911991Sheppo 	/*
1921991Sheppo 	 * Free mondo data for xcalls.
1931991Sheppo 	 */
1941991Sheppo 	if (mcpup->mondo_data) {
1951991Sheppo 		contig_mem_free(mcpup->mondo_data, INTR_REPORT_SIZE);
1961991Sheppo 		mcpup->mondo_data = NULL;
1971991Sheppo 		mcpup->mondo_data_ra = NULL;
1981991Sheppo 	}
1991991Sheppo 
2001991Sheppo 	/*
201*9337SAnthony.Yznaga@Sun.COM 	 *  Free per-cpu list of ncpu_guest_max for xcalls
2021991Sheppo 	 */
203*9337SAnthony.Yznaga@Sun.COM 	cpu_list_size = ncpu_guest_max * sizeof (uint16_t);
2041991Sheppo 	if (cpu_list_size < INTR_REPORT_SIZE)
2051991Sheppo 		cpu_list_size = INTR_REPORT_SIZE;
2061991Sheppo 
207*9337SAnthony.Yznaga@Sun.COM 	/*
208*9337SAnthony.Yznaga@Sun.COM 	 * contig_mem_alloc() requires size to be a power of 2.
209*9337SAnthony.Yznaga@Sun.COM 	 * Increase size to a power of 2 if necessary.
210*9337SAnthony.Yznaga@Sun.COM 	 */
211*9337SAnthony.Yznaga@Sun.COM 	if ((cpu_list_size & (cpu_list_size - 1)) != 0) {
212*9337SAnthony.Yznaga@Sun.COM 		cpu_list_size = 1 << highbit(cpu_list_size);
213*9337SAnthony.Yznaga@Sun.COM 	}
214*9337SAnthony.Yznaga@Sun.COM 
2151991Sheppo 	if (mcpup->cpu_list) {
2161991Sheppo 		contig_mem_free(mcpup->cpu_list, cpu_list_size);
2171991Sheppo 		mcpup->cpu_list = NULL;
2181991Sheppo 		mcpup->cpu_list_ra = NULL;
2191991Sheppo 	}
2200Sstevel@tonic-gate 
2211991Sheppo 	/*
2221991Sheppo 	 * Free sun4v interrupt and error queues.
2231991Sheppo 	 */
2241991Sheppo 	if (mcpup->cpu_q_va) {
2251991Sheppo 		cpu_q_size = cpu_q_entries * INTR_REPORT_SIZE;
2261991Sheppo 		contig_mem_free(mcpup->cpu_q_va, cpu_q_size);
2271991Sheppo 		mcpup->cpu_q_va = NULL;
2281991Sheppo 		mcpup->cpu_q_base_pa = NULL;
2291991Sheppo 		mcpup->cpu_q_size = 0;
2301991Sheppo 	}
2311991Sheppo 
2321991Sheppo 	if (mcpup->dev_q_va) {
2331991Sheppo 		dev_q_size = dev_q_entries * INTR_REPORT_SIZE;
2341991Sheppo 		contig_mem_free(mcpup->dev_q_va, dev_q_size);
2351991Sheppo 		mcpup->dev_q_va = NULL;
2361991Sheppo 		mcpup->dev_q_base_pa = NULL;
2371991Sheppo 		mcpup->dev_q_size = 0;
2381991Sheppo 	}
2391991Sheppo 
2401991Sheppo 	if (mcpup->cpu_rq_va) {
2411991Sheppo 		cpu_rq_size = cpu_rq_entries * Q_ENTRY_SIZE;
2421991Sheppo 		contig_mem_free(mcpup->cpu_rq_va, 2 * cpu_rq_size);
2431991Sheppo 		mcpup->cpu_rq_va = NULL;
2441991Sheppo 		mcpup->cpu_rq_base_pa = NULL;
2451991Sheppo 		mcpup->cpu_rq_size = 0;
2461991Sheppo 	}
2471991Sheppo 
2481991Sheppo 	if (mcpup->cpu_nrq_va) {
2491991Sheppo 		cpu_nrq_size = cpu_nrq_entries * Q_ENTRY_SIZE;
2501991Sheppo 		contig_mem_free(mcpup->cpu_nrq_va, 2 * cpu_nrq_size);
2511991Sheppo 		mcpup->cpu_nrq_va = NULL;
2521991Sheppo 		mcpup->cpu_nrq_base_pa = NULL;
2531991Sheppo 		mcpup->cpu_nrq_size = 0;
2541991Sheppo 	}
2550Sstevel@tonic-gate }
256