xref: /freebsd-src/sys/dev/mlx5/mlx5_core/mlx5_pagealloc.c (revision 5c005275195f4b9c2564606ec3b4bccd3415f1a8)
1 /*-
2  * Copyright (c) 2013-2017, Mellanox Technologies, Ltd.  All rights reserved.
3  *
4  * Redistribution and use in source and binary forms, with or without
5  * modification, are permitted provided that the following conditions
6  * are met:
7  * 1. Redistributions of source code must retain the above copyright
8  *    notice, this list of conditions and the following disclaimer.
9  * 2. Redistributions in binary form must reproduce the above copyright
10  *    notice, this list of conditions and the following disclaimer in the
11  *    documentation and/or other materials provided with the distribution.
12  *
13  * THIS SOFTWARE IS PROVIDED BY AUTHOR AND CONTRIBUTORS `AS IS' AND
14  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
15  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
16  * ARE DISCLAIMED.  IN NO EVENT SHALL AUTHOR OR CONTRIBUTORS BE LIABLE
17  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
18  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
19  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
20  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
21  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
22  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
23  * SUCH DAMAGE.
24  *
25  * $FreeBSD$
26  */
27 
28 #include <linux/kernel.h>
29 #include <linux/module.h>
30 #include <linux/delay.h>
31 #include <dev/mlx5/driver.h>
32 #include "mlx5_core.h"
33 
34 CTASSERT((uintptr_t)PAGE_MASK > (uintptr_t)PAGE_SIZE);
35 
36 struct mlx5_pages_req {
37 	struct mlx5_core_dev *dev;
38 	u16	func_id;
39 	s32	npages;
40 	struct work_struct work;
41 };
42 
43 
44 enum {
45 	MAX_RECLAIM_TIME_MSECS	= 5000,
46 };
47 
48 static void
49 mlx5_fwp_load_mem_cb(void *arg, bus_dma_segment_t *segs, int nseg, int error)
50 {
51 	struct mlx5_fw_page *fwp;
52 	uint8_t owned;
53 
54 	fwp = (struct mlx5_fw_page *)arg;
55 	owned = MLX5_DMA_OWNED(fwp->dev);
56 
57 	if (!owned)
58 		MLX5_DMA_LOCK(fwp->dev);
59 
60 	if (error == 0) {
61 		KASSERT(nseg == 1, ("Number of segments is different from 1"));
62 		fwp->dma_addr = segs->ds_addr;
63 		fwp->load_done = MLX5_LOAD_ST_SUCCESS;
64 	} else {
65 		fwp->load_done = MLX5_LOAD_ST_FAILURE;
66 	}
67 	MLX5_DMA_DONE(fwp->dev);
68 
69 	if (!owned)
70 		MLX5_DMA_UNLOCK(fwp->dev);
71 }
72 
73 void
74 mlx5_fwp_flush(struct mlx5_fw_page *fwp)
75 {
76 	unsigned num = fwp->numpages;
77 
78 	while (num--)
79 		bus_dmamap_sync(fwp[num].dev->cmd.dma_tag, fwp[num].dma_map, BUS_DMASYNC_PREWRITE);
80 }
81 
82 void
83 mlx5_fwp_invalidate(struct mlx5_fw_page *fwp)
84 {
85 	unsigned num = fwp->numpages;
86 
87 	while (num--) {
88 		bus_dmamap_sync(fwp[num].dev->cmd.dma_tag, fwp[num].dma_map, BUS_DMASYNC_POSTREAD);
89 		bus_dmamap_sync(fwp[num].dev->cmd.dma_tag, fwp[num].dma_map, BUS_DMASYNC_PREREAD);
90 	}
91 }
92 
93 struct mlx5_fw_page *
94 mlx5_fwp_alloc(struct mlx5_core_dev *dev, gfp_t flags, unsigned num)
95 {
96 	struct mlx5_fw_page *fwp;
97 	unsigned x;
98 	int err;
99 
100 	/* check for special case */
101 	if (num == 0) {
102 		fwp = kzalloc(sizeof(*fwp), flags);
103 		if (fwp != NULL)
104 			fwp->dev = dev;
105 		return (fwp);
106 	}
107 
108 	/* we need sleeping context for this function */
109 	if (flags & M_NOWAIT)
110 		return (NULL);
111 
112 	fwp = kzalloc(sizeof(*fwp) * num, flags);
113 
114 	/* serialize loading the DMA map(s) */
115 	sx_xlock(&dev->cmd.dma_sx);
116 
117 	for (x = 0; x != num; x++) {
118 		/* store pointer to MLX5 core device */
119 		fwp[x].dev = dev;
120 		/* store number of pages left from the array */
121 		fwp[x].numpages = num - x;
122 
123 		/* allocate memory */
124 		err = bus_dmamem_alloc(dev->cmd.dma_tag, &fwp[x].virt_addr,
125 		    BUS_DMA_WAITOK | BUS_DMA_COHERENT, &fwp[x].dma_map);
126 		if (err != 0)
127 			goto failure;
128 
129 		/* load memory into DMA */
130 		MLX5_DMA_LOCK(dev);
131 		err = bus_dmamap_load(
132 		    dev->cmd.dma_tag, fwp[x].dma_map, fwp[x].virt_addr,
133 		    MLX5_ADAPTER_PAGE_SIZE, &mlx5_fwp_load_mem_cb,
134 		    fwp + x, BUS_DMA_WAITOK | BUS_DMA_COHERENT);
135 
136 		while (fwp[x].load_done == MLX5_LOAD_ST_NONE)
137 			MLX5_DMA_WAIT(dev);
138 		MLX5_DMA_UNLOCK(dev);
139 
140 		/* check for error */
141 		if (fwp[x].load_done != MLX5_LOAD_ST_SUCCESS) {
142 			bus_dmamem_free(dev->cmd.dma_tag, fwp[x].virt_addr,
143 			    fwp[x].dma_map);
144 			goto failure;
145 		}
146 	}
147 	sx_xunlock(&dev->cmd.dma_sx);
148 	return (fwp);
149 
150 failure:
151 	while (x--) {
152 		bus_dmamap_unload(dev->cmd.dma_tag, fwp[x].dma_map);
153 		bus_dmamem_free(dev->cmd.dma_tag, fwp[x].virt_addr, fwp[x].dma_map);
154 	}
155 	sx_xunlock(&dev->cmd.dma_sx);
156 	return (NULL);
157 }
158 
159 void
160 mlx5_fwp_free(struct mlx5_fw_page *fwp)
161 {
162 	struct mlx5_core_dev *dev;
163 	unsigned num;
164 
165 	/* be NULL safe */
166 	if (fwp == NULL)
167 		return;
168 
169 	/* check for special case */
170 	if (fwp->numpages == 0) {
171 		kfree(fwp);
172 		return;
173 	}
174 
175 	num = fwp->numpages;
176 	dev = fwp->dev;
177 
178 	while (num--) {
179 		bus_dmamap_unload(dev->cmd.dma_tag, fwp[num].dma_map);
180 		bus_dmamem_free(dev->cmd.dma_tag, fwp[num].virt_addr, fwp[num].dma_map);
181 	}
182 
183 	kfree(fwp);
184 }
185 
186 u64
187 mlx5_fwp_get_dma(struct mlx5_fw_page *fwp, size_t offset)
188 {
189 	size_t index = (offset / MLX5_ADAPTER_PAGE_SIZE);
190 	KASSERT(index < fwp->numpages, ("Invalid offset: %lld", (long long)offset));
191 
192 	return ((fwp + index)->dma_addr + (offset % MLX5_ADAPTER_PAGE_SIZE));
193 }
194 
195 void *
196 mlx5_fwp_get_virt(struct mlx5_fw_page *fwp, size_t offset)
197 {
198 	size_t index = (offset / MLX5_ADAPTER_PAGE_SIZE);
199 	KASSERT(index < fwp->numpages, ("Invalid offset: %lld", (long long)offset));
200 
201 	return ((char *)(fwp + index)->virt_addr + (offset % MLX5_ADAPTER_PAGE_SIZE));
202 }
203 
204 static int
205 mlx5_insert_fw_page_locked(struct mlx5_core_dev *dev, struct mlx5_fw_page *nfp)
206 {
207 	struct rb_root *root = &dev->priv.page_root;
208 	struct rb_node **new = &root->rb_node;
209 	struct rb_node *parent = NULL;
210 	struct mlx5_fw_page *tfp;
211 
212 	while (*new) {
213 		parent = *new;
214 		tfp = rb_entry(parent, struct mlx5_fw_page, rb_node);
215 		if (tfp->dma_addr < nfp->dma_addr)
216 			new = &parent->rb_left;
217 		else if (tfp->dma_addr > nfp->dma_addr)
218 			new = &parent->rb_right;
219 		else
220 			return (-EEXIST);
221 	}
222 
223 	rb_link_node(&nfp->rb_node, parent, new);
224 	rb_insert_color(&nfp->rb_node, root);
225 	return (0);
226 }
227 
228 static struct mlx5_fw_page *
229 mlx5_remove_fw_page_locked(struct mlx5_core_dev *dev, bus_addr_t addr)
230 {
231 	struct rb_root *root = &dev->priv.page_root;
232 	struct rb_node *tmp = root->rb_node;
233 	struct mlx5_fw_page *result = NULL;
234 	struct mlx5_fw_page *tfp;
235 
236 	while (tmp) {
237 		tfp = rb_entry(tmp, struct mlx5_fw_page, rb_node);
238 		if (tfp->dma_addr < addr) {
239 			tmp = tmp->rb_left;
240 		} else if (tfp->dma_addr > addr) {
241 			tmp = tmp->rb_right;
242 		} else {
243 			rb_erase(&tfp->rb_node, &dev->priv.page_root);
244 			result = tfp;
245 			break;
246 		}
247 	}
248 	return (result);
249 }
250 
251 static int
252 alloc_4k(struct mlx5_core_dev *dev, u64 *addr, u16 func_id)
253 {
254 	struct mlx5_fw_page *fwp;
255 	int err;
256 
257 	fwp = mlx5_fwp_alloc(dev, GFP_KERNEL, 1);
258 	if (fwp == NULL)
259 		return (-ENOMEM);
260 
261 	fwp->func_id = func_id;
262 
263 	MLX5_DMA_LOCK(dev);
264 	err = mlx5_insert_fw_page_locked(dev, fwp);
265 	MLX5_DMA_UNLOCK(dev);
266 
267 	if (err != 0) {
268 		mlx5_fwp_free(fwp);
269 	} else {
270 		/* make sure cached data is cleaned */
271 		mlx5_fwp_invalidate(fwp);
272 
273 		/* store DMA address */
274 		*addr = fwp->dma_addr;
275 	}
276 	return (err);
277 }
278 
279 static void
280 free_4k(struct mlx5_core_dev *dev, u64 addr)
281 {
282 	struct mlx5_fw_page *fwp;
283 
284 	MLX5_DMA_LOCK(dev);
285 	fwp = mlx5_remove_fw_page_locked(dev, addr);
286 	MLX5_DMA_UNLOCK(dev);
287 
288 	if (fwp == NULL) {
289 		mlx5_core_warn(dev, "Cannot free 4K page at 0x%llx\n", (long long)addr);
290 		return;
291 	}
292 	mlx5_fwp_free(fwp);
293 }
294 
295 static int mlx5_cmd_query_pages(struct mlx5_core_dev *dev, u16 *func_id,
296 				s32 *npages, int boot)
297 {
298 	u32 in[MLX5_ST_SZ_DW(query_pages_in)] = {0};
299 	u32 out[MLX5_ST_SZ_DW(query_pages_out)] = {0};
300 	int err;
301 
302 	MLX5_SET(query_pages_in, in, opcode, MLX5_CMD_OP_QUERY_PAGES);
303 	MLX5_SET(query_pages_in, in, op_mod, boot ?
304 		 MLX5_QUERY_PAGES_IN_OP_MOD_BOOT_PAGES :
305 		 MLX5_QUERY_PAGES_IN_OP_MOD_INIT_PAGES);
306 
307 	err = mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
308 	if (err)
309 		return err;
310 
311 	*npages = MLX5_GET(query_pages_out, out, num_pages);
312 	*func_id = MLX5_GET(query_pages_out, out, function_id);
313 
314 	return 0;
315 }
316 
317 static int give_pages(struct mlx5_core_dev *dev, u16 func_id, int npages,
318 		      int notify_fail)
319 {
320 	u32 out[MLX5_ST_SZ_DW(manage_pages_out)] = {0};
321 	int inlen = MLX5_ST_SZ_BYTES(manage_pages_in);
322 	u64 addr;
323 	int err;
324 	u32 *in, *nin;
325 	int i = 0;
326 
327 	inlen += npages * MLX5_FLD_SZ_BYTES(manage_pages_in, pas[0]);
328 	in = mlx5_vzalloc(inlen);
329 	if (!in) {
330 		mlx5_core_warn(dev, "vzalloc failed %d\n", inlen);
331 		err = -ENOMEM;
332 		goto out_alloc;
333 	}
334 
335 	for (i = 0; i < npages; i++) {
336 		err = alloc_4k(dev, &addr, func_id);
337 		if (err)
338 			goto out_alloc;
339 		MLX5_ARRAY_SET64(manage_pages_in, in, pas, i, addr);
340 	}
341 
342 	MLX5_SET(manage_pages_in, in, opcode, MLX5_CMD_OP_MANAGE_PAGES);
343 	MLX5_SET(manage_pages_in, in, op_mod, MLX5_PAGES_GIVE);
344 	MLX5_SET(manage_pages_in, in, function_id, func_id);
345 	MLX5_SET(manage_pages_in, in, input_num_entries, npages);
346 
347 	err = mlx5_cmd_exec(dev, in, inlen, out, sizeof(out));
348 	if (err) {
349 		mlx5_core_warn(dev, "func_id 0x%x, npages %d, err %d\n",
350 			       func_id, npages, err);
351 		goto out_alloc;
352 	}
353 	dev->priv.fw_pages += npages;
354 	dev->priv.pages_per_func[func_id] += npages;
355 
356 	mlx5_core_dbg(dev, "err %d\n", err);
357 
358 	goto out_free;
359 
360 out_alloc:
361 	if (notify_fail) {
362 		nin = mlx5_vzalloc(inlen);
363 		if (!nin)
364 			goto out_4k;
365 
366 		memset(&out, 0, sizeof(out));
367 		MLX5_SET(manage_pages_in, nin, opcode, MLX5_CMD_OP_MANAGE_PAGES);
368 		MLX5_SET(manage_pages_in, nin, op_mod, MLX5_PAGES_CANT_GIVE);
369 		MLX5_SET(manage_pages_in, nin, function_id, func_id);
370 		if (mlx5_cmd_exec(dev, nin, inlen, out, sizeof(out)))
371 			mlx5_core_warn(dev, "page notify failed\n");
372 		kvfree(nin);
373 	}
374 
375 out_4k:
376 	for (i--; i >= 0; i--)
377 		free_4k(dev, MLX5_GET64(manage_pages_in, in, pas[i]));
378 out_free:
379 	kvfree(in);
380 	return err;
381 }
382 
383 static int reclaim_pages_cmd(struct mlx5_core_dev *dev,
384 			     u32 *in, int in_size, u32 *out, int out_size)
385 {
386 	struct mlx5_fw_page *fwp;
387 	struct rb_node *p;
388 	u32 func_id;
389 	u32 npages;
390 	u32 i = 0;
391 
392 	if (dev->state != MLX5_DEVICE_STATE_INTERNAL_ERROR)
393 		return mlx5_cmd_exec(dev, in, in_size, out, out_size);
394 
395 	/* No hard feelings, we want our pages back! */
396 	npages = MLX5_GET(manage_pages_in, in, input_num_entries);
397 	func_id = MLX5_GET(manage_pages_in, in, function_id);
398 
399 	p = rb_first(&dev->priv.page_root);
400 	while (p && i < npages) {
401 		fwp = rb_entry(p, struct mlx5_fw_page, rb_node);
402 		p = rb_next(p);
403 		if (fwp->func_id != func_id)
404 			continue;
405 
406 		MLX5_ARRAY_SET64(manage_pages_out, out, pas, i, fwp->dma_addr);
407 		i++;
408 	}
409 
410 	MLX5_SET(manage_pages_out, out, output_num_entries, i);
411 	return 0;
412 }
413 
414 static int reclaim_pages(struct mlx5_core_dev *dev, u32 func_id, int npages,
415 			 int *nclaimed)
416 {
417 	int outlen = MLX5_ST_SZ_BYTES(manage_pages_out);
418 	u32 in[MLX5_ST_SZ_DW(manage_pages_in)] = {0};
419 	int num_claimed;
420 	u32 *out;
421 	int err;
422 	int i;
423 
424 	if (nclaimed)
425 		*nclaimed = 0;
426 
427 	outlen += npages * MLX5_FLD_SZ_BYTES(manage_pages_out, pas[0]);
428 	out = mlx5_vzalloc(outlen);
429 	if (!out)
430 		return -ENOMEM;
431 
432 	MLX5_SET(manage_pages_in, in, opcode, MLX5_CMD_OP_MANAGE_PAGES);
433 	MLX5_SET(manage_pages_in, in, op_mod, MLX5_PAGES_TAKE);
434 	MLX5_SET(manage_pages_in, in, function_id, func_id);
435 	MLX5_SET(manage_pages_in, in, input_num_entries, npages);
436 
437 	mlx5_core_dbg(dev, "npages %d, outlen %d\n", npages, outlen);
438 	err = reclaim_pages_cmd(dev, in, sizeof(in), out, outlen);
439 	if (err) {
440 		mlx5_core_err(dev, "failed reclaiming pages\n");
441 		goto out_free;
442 	}
443 
444 	num_claimed = MLX5_GET(manage_pages_out, out, output_num_entries);
445 	if (nclaimed)
446 		*nclaimed = num_claimed;
447 
448 	dev->priv.fw_pages -= num_claimed;
449 	dev->priv.pages_per_func[func_id] -= num_claimed;
450 	for (i = 0; i < num_claimed; i++)
451 		free_4k(dev, MLX5_GET64(manage_pages_out, out, pas[i]));
452 
453 out_free:
454 	kvfree(out);
455 	return err;
456 }
457 
458 static void pages_work_handler(struct work_struct *work)
459 {
460 	struct mlx5_pages_req *req = container_of(work, struct mlx5_pages_req, work);
461 	struct mlx5_core_dev *dev = req->dev;
462 	int err = 0;
463 
464 	if (req->npages < 0)
465 		err = reclaim_pages(dev, req->func_id, -1 * req->npages, NULL);
466 	else if (req->npages > 0)
467 		err = give_pages(dev, req->func_id, req->npages, 1);
468 
469 	if (err)
470 		mlx5_core_warn(dev, "%s fail %d\n",
471 			       req->npages < 0 ? "reclaim" : "give", err);
472 
473 	kfree(req);
474 }
475 
476 void mlx5_core_req_pages_handler(struct mlx5_core_dev *dev, u16 func_id,
477 				 s32 npages)
478 {
479 	struct mlx5_pages_req *req;
480 
481 	req = kzalloc(sizeof(*req), GFP_ATOMIC);
482 	if (!req) {
483 		mlx5_core_warn(dev, "failed to allocate pages request\n");
484 		return;
485 	}
486 
487 	req->dev = dev;
488 	req->func_id = func_id;
489 	req->npages = npages;
490 	INIT_WORK(&req->work, pages_work_handler);
491 	if (!queue_work(dev->priv.pg_wq, &req->work))
492 		mlx5_core_warn(dev, "failed to queue pages handler work\n");
493 }
494 
495 int mlx5_satisfy_startup_pages(struct mlx5_core_dev *dev, int boot)
496 {
497 	u16 uninitialized_var(func_id);
498 	s32 uninitialized_var(npages);
499 	int err;
500 
501 	err = mlx5_cmd_query_pages(dev, &func_id, &npages, boot);
502 	if (err)
503 		return err;
504 
505 	mlx5_core_dbg(dev, "requested %d %s pages for func_id 0x%x\n",
506 		      npages, boot ? "boot" : "init", func_id);
507 
508 	return give_pages(dev, func_id, npages, 0);
509 }
510 
511 enum {
512 	MLX5_BLKS_FOR_RECLAIM_PAGES = 12
513 };
514 
515 s64 mlx5_wait_for_reclaim_vfs_pages(struct mlx5_core_dev *dev)
516 {
517 	int end = jiffies + msecs_to_jiffies(MAX_RECLAIM_TIME_MSECS);
518 	s64 prevpages = 0;
519 	s64 npages = 0;
520 
521 	while (!time_after(jiffies, end)) {
522 		/* exclude own function, VFs only */
523 		npages = dev->priv.fw_pages - dev->priv.pages_per_func[0];
524 		if (!npages)
525 			break;
526 
527 		if (npages != prevpages)
528 			end = end + msecs_to_jiffies(100);
529 
530 		prevpages = npages;
531 		msleep(1);
532 	}
533 
534 	if (npages)
535 		mlx5_core_warn(dev, "FW did not return all VFs pages, will cause to memory leak\n");
536 
537 	return -npages;
538 }
539 
540 static int optimal_reclaimed_pages(void)
541 {
542 	struct mlx5_cmd_prot_block *block;
543 	struct mlx5_cmd_layout *lay;
544 	int ret;
545 
546 	ret = (sizeof(lay->out) + MLX5_BLKS_FOR_RECLAIM_PAGES * sizeof(block->data) -
547 	       MLX5_ST_SZ_BYTES(manage_pages_out)) /
548 	       MLX5_FLD_SZ_BYTES(manage_pages_out, pas[0]);
549 
550 	return ret;
551 }
552 
553 int mlx5_reclaim_startup_pages(struct mlx5_core_dev *dev)
554 {
555 	int end = jiffies + msecs_to_jiffies(MAX_RECLAIM_TIME_MSECS);
556 	struct mlx5_fw_page *fwp;
557 	struct rb_node *p;
558 	int nclaimed = 0;
559 	int err;
560 
561 	do {
562 		p = rb_first(&dev->priv.page_root);
563 		if (p) {
564 			fwp = rb_entry(p, struct mlx5_fw_page, rb_node);
565 			err = reclaim_pages(dev, fwp->func_id,
566 					    optimal_reclaimed_pages(),
567 					    &nclaimed);
568 			if (err) {
569 				mlx5_core_warn(dev, "failed reclaiming pages (%d)\n",
570 					       err);
571 				return err;
572 			}
573 
574 			if (nclaimed)
575 				end = jiffies + msecs_to_jiffies(MAX_RECLAIM_TIME_MSECS);
576 		}
577 		if (time_after(jiffies, end)) {
578 			mlx5_core_warn(dev, "FW did not return all pages. giving up...\n");
579 			break;
580 		}
581 	} while (p);
582 
583 	return 0;
584 }
585 
586 void mlx5_pagealloc_init(struct mlx5_core_dev *dev)
587 {
588 
589 	dev->priv.page_root = RB_ROOT;
590 }
591 
592 void mlx5_pagealloc_cleanup(struct mlx5_core_dev *dev)
593 {
594 	/* nothing */
595 }
596 
597 int mlx5_pagealloc_start(struct mlx5_core_dev *dev)
598 {
599 	dev->priv.pg_wq = create_singlethread_workqueue("mlx5_page_allocator");
600 	if (!dev->priv.pg_wq)
601 		return -ENOMEM;
602 
603 	return 0;
604 }
605 
606 void mlx5_pagealloc_stop(struct mlx5_core_dev *dev)
607 {
608 	destroy_workqueue(dev->priv.pg_wq);
609 }
610