xref: /freebsd-src/sys/dev/mlx5/mlx5_core/mlx5_uar.c (revision 3311ff84eac3b7e82f28e331df0586036c6d361c)
1 /*-
2  * Copyright (c) 2013-2015, Mellanox Technologies, Ltd.  All rights reserved.
3  *
4  * Redistribution and use in source and binary forms, with or without
5  * modification, are permitted provided that the following conditions
6  * are met:
7  * 1. Redistributions of source code must retain the above copyright
8  *    notice, this list of conditions and the following disclaimer.
9  * 2. Redistributions in binary form must reproduce the above copyright
10  *    notice, this list of conditions and the following disclaimer in the
11  *    documentation and/or other materials provided with the distribution.
12  *
13  * THIS SOFTWARE IS PROVIDED BY AUTHOR AND CONTRIBUTORS `AS IS' AND
14  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
15  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
16  * ARE DISCLAIMED.  IN NO EVENT SHALL AUTHOR OR CONTRIBUTORS BE LIABLE
17  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
18  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
19  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
20  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
21  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
22  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
23  * SUCH DAMAGE.
24  *
25  * $FreeBSD$
26  */
27 
28 #include <linux/kernel.h>
29 #include <linux/module.h>
30 #include <linux/io-mapping.h>
31 #include <dev/mlx5/driver.h>
32 #include "mlx5_core.h"
33 
34 enum {
35 	NUM_DRIVER_UARS		= 4,
36 	NUM_LOW_LAT_UUARS	= 4,
37 };
38 
39 int mlx5_cmd_alloc_uar(struct mlx5_core_dev *dev, u32 *uarn)
40 {
41 	u32 in[MLX5_ST_SZ_DW(alloc_uar_in)];
42 	u32 out[MLX5_ST_SZ_DW(alloc_uar_out)];
43 	int err;
44 
45 	memset(in, 0, sizeof(in));
46 
47 	MLX5_SET(alloc_uar_in, in, opcode, MLX5_CMD_OP_ALLOC_UAR);
48 
49 	memset(out, 0, sizeof(out));
50 	err = mlx5_cmd_exec_check_status(dev, in, sizeof(in), out, sizeof(out));
51 	if (err)
52 		return err;
53 
54 	*uarn = MLX5_GET(alloc_uar_out, out, uar);
55 
56 	return 0;
57 }
58 EXPORT_SYMBOL(mlx5_cmd_alloc_uar);
59 
60 int mlx5_cmd_free_uar(struct mlx5_core_dev *dev, u32 uarn)
61 {
62 	u32 in[MLX5_ST_SZ_DW(dealloc_uar_in)];
63 	u32 out[MLX5_ST_SZ_DW(dealloc_uar_out)];
64 
65 	memset(in, 0, sizeof(in));
66 
67 	MLX5_SET(dealloc_uar_in, in, opcode, MLX5_CMD_OP_DEALLOC_UAR);
68 	MLX5_SET(dealloc_uar_in, in, uar, uarn);
69 
70 	memset(out, 0, sizeof(out));
71 	return mlx5_cmd_exec_check_status(dev, in,  sizeof(in),
72 					       out, sizeof(out));
73 }
74 EXPORT_SYMBOL(mlx5_cmd_free_uar);
75 
76 static int need_uuar_lock(int uuarn)
77 {
78 	int tot_uuars = NUM_DRIVER_UARS * MLX5_BF_REGS_PER_PAGE;
79 
80 	if (uuarn == 0 || tot_uuars - NUM_LOW_LAT_UUARS)
81 		return 0;
82 
83 	return 1;
84 }
85 
86 int mlx5_alloc_uuars(struct mlx5_core_dev *dev, struct mlx5_uuar_info *uuari)
87 {
88 	int tot_uuars = NUM_DRIVER_UARS * MLX5_BF_REGS_PER_PAGE;
89 	struct mlx5_bf *bf;
90 	phys_addr_t addr;
91 	int err;
92 	int i;
93 
94 	uuari->num_uars = NUM_DRIVER_UARS;
95 	uuari->num_low_latency_uuars = NUM_LOW_LAT_UUARS;
96 
97 	mutex_init(&uuari->lock);
98 	uuari->uars = kcalloc(uuari->num_uars, sizeof(*uuari->uars), GFP_KERNEL);
99 
100 	uuari->bfs = kcalloc(tot_uuars, sizeof(*uuari->bfs), GFP_KERNEL);
101 
102 	uuari->bitmap = kcalloc(BITS_TO_LONGS(tot_uuars), sizeof(*uuari->bitmap),
103 				GFP_KERNEL);
104 
105 	uuari->count = kcalloc(tot_uuars, sizeof(*uuari->count), GFP_KERNEL);
106 
107 	for (i = 0; i < uuari->num_uars; i++) {
108 		err = mlx5_cmd_alloc_uar(dev, &uuari->uars[i].index);
109 		if (err)
110 			goto out_count;
111 
112 		addr = pci_resource_start(dev->pdev, 0) +
113 		       ((phys_addr_t)(uuari->uars[i].index) << PAGE_SHIFT);
114 		uuari->uars[i].map = ioremap(addr, PAGE_SIZE);
115 		if (!uuari->uars[i].map) {
116 			mlx5_cmd_free_uar(dev, uuari->uars[i].index);
117 			err = -ENOMEM;
118 			goto out_count;
119 		}
120 		mlx5_core_dbg(dev, "allocated uar index 0x%x, mmaped at %p\n",
121 			      uuari->uars[i].index, uuari->uars[i].map);
122 	}
123 
124 	for (i = 0; i < tot_uuars; i++) {
125 		bf = &uuari->bfs[i];
126 
127 		bf->buf_size = (1 << MLX5_CAP_GEN(dev, log_bf_reg_size)) / 2;
128 		bf->uar = &uuari->uars[i / MLX5_BF_REGS_PER_PAGE];
129 		bf->regreg = uuari->uars[i / MLX5_BF_REGS_PER_PAGE].map;
130 		bf->reg = NULL; /* Add WC support */
131 		bf->offset = (i % MLX5_BF_REGS_PER_PAGE) *
132 			     (1 << MLX5_CAP_GEN(dev, log_bf_reg_size)) +
133 			     MLX5_BF_OFFSET;
134 		bf->need_lock = need_uuar_lock(i);
135 		spin_lock_init(&bf->lock);
136 		spin_lock_init(&bf->lock32);
137 		bf->uuarn = i;
138 	}
139 
140 	return 0;
141 
142 out_count:
143 	for (i--; i >= 0; i--) {
144 		iounmap(uuari->uars[i].map);
145 		mlx5_cmd_free_uar(dev, uuari->uars[i].index);
146 	}
147 	kfree(uuari->count);
148 
149 	kfree(uuari->bitmap);
150 
151 	kfree(uuari->bfs);
152 
153 	kfree(uuari->uars);
154 	return err;
155 }
156 
157 int mlx5_free_uuars(struct mlx5_core_dev *dev, struct mlx5_uuar_info *uuari)
158 {
159 	int i = uuari->num_uars;
160 
161 	for (i--; i >= 0; i--) {
162 		iounmap(uuari->uars[i].map);
163 		mlx5_cmd_free_uar(dev, uuari->uars[i].index);
164 	}
165 
166 	kfree(uuari->count);
167 	kfree(uuari->bitmap);
168 	kfree(uuari->bfs);
169 	kfree(uuari->uars);
170 
171 	return 0;
172 }
173 
174 int mlx5_alloc_map_uar(struct mlx5_core_dev *mdev, struct mlx5_uar *uar)
175 {
176 	phys_addr_t pfn;
177 	phys_addr_t uar_bar_start;
178 	int err;
179 
180 	err = mlx5_cmd_alloc_uar(mdev, &uar->index);
181 	if (err) {
182 		mlx5_core_warn(mdev, "mlx5_cmd_alloc_uar() failed, %d\n", err);
183 		return err;
184 	}
185 
186 	uar_bar_start = pci_resource_start(mdev->pdev, 0);
187 	pfn           = (uar_bar_start >> PAGE_SHIFT) + uar->index;
188 	uar->map      = ioremap(pfn << PAGE_SHIFT, PAGE_SIZE);
189 	if (!uar->map) {
190 		mlx5_core_warn(mdev, "ioremap() failed, %d\n", err);
191 		err = -ENOMEM;
192 		goto err_free_uar;
193 	}
194 
195 	if (mdev->priv.bf_mapping)
196 		uar->bf_map = io_mapping_map_wc(mdev->priv.bf_mapping,
197 						uar->index << PAGE_SHIFT);
198 
199 	return 0;
200 
201 err_free_uar:
202 	mlx5_cmd_free_uar(mdev, uar->index);
203 
204 	return err;
205 }
206 EXPORT_SYMBOL(mlx5_alloc_map_uar);
207 
208 void mlx5_unmap_free_uar(struct mlx5_core_dev *mdev, struct mlx5_uar *uar)
209 {
210 	io_mapping_unmap(uar->bf_map);
211 	iounmap(uar->map);
212 	mlx5_cmd_free_uar(mdev, uar->index);
213 }
214 EXPORT_SYMBOL(mlx5_unmap_free_uar);
215