xref: /freebsd-src/sys/dev/mlx5/mlx5_ib/mlx5_ib_mr.c (revision 95ee2897e98f5d444f26ed2334cc7c439f9c16c6)
112515907SHans Petter Selasky /*-
2b633e08cSHans Petter Selasky  * Copyright (c) 2013-2021, Mellanox Technologies, Ltd.  All rights reserved.
312515907SHans Petter Selasky  *
412515907SHans Petter Selasky  * Redistribution and use in source and binary forms, with or without
512515907SHans Petter Selasky  * modification, are permitted provided that the following conditions
612515907SHans Petter Selasky  * are met:
712515907SHans Petter Selasky  * 1. Redistributions of source code must retain the above copyright
812515907SHans Petter Selasky  *    notice, this list of conditions and the following disclaimer.
912515907SHans Petter Selasky  * 2. Redistributions in binary form must reproduce the above copyright
1012515907SHans Petter Selasky  *    notice, this list of conditions and the following disclaimer in the
1112515907SHans Petter Selasky  *    documentation and/or other materials provided with the distribution.
1212515907SHans Petter Selasky  *
1312515907SHans Petter Selasky  * THIS SOFTWARE IS PROVIDED BY AUTHOR AND CONTRIBUTORS `AS IS' AND
1412515907SHans Petter Selasky  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
1512515907SHans Petter Selasky  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
1612515907SHans Petter Selasky  * ARE DISCLAIMED.  IN NO EVENT SHALL AUTHOR OR CONTRIBUTORS BE LIABLE
1712515907SHans Petter Selasky  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
1812515907SHans Petter Selasky  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
1912515907SHans Petter Selasky  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
2012515907SHans Petter Selasky  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
2112515907SHans Petter Selasky  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
2212515907SHans Petter Selasky  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
2312515907SHans Petter Selasky  * SUCH DAMAGE.
2412515907SHans Petter Selasky  */
2512515907SHans Petter Selasky 
2670600979SKonstantin Belousov #include "opt_rss.h"
2770600979SKonstantin Belousov #include "opt_ratelimit.h"
2870600979SKonstantin Belousov 
2912515907SHans Petter Selasky #include <linux/kref.h>
3012515907SHans Petter Selasky #include <linux/random.h>
3112515907SHans Petter Selasky #include <linux/delay.h>
328e6e287fSHans Petter Selasky #include <linux/sched.h>
3312515907SHans Petter Selasky #include <rdma/ib_umem.h>
348e6e287fSHans Petter Selasky #include <rdma/ib_umem_odp.h>
358e6e287fSHans Petter Selasky #include <rdma/ib_verbs.h>
36028130b8SKonstantin Belousov #include <dev/mlx5/mlx5_ib/mlx5_ib.h>
3712515907SHans Petter Selasky 
3812515907SHans Petter Selasky enum {
3912515907SHans Petter Selasky 	MAX_PENDING_REG_MR = 8,
4012515907SHans Petter Selasky };
4112515907SHans Petter Selasky 
4212515907SHans Petter Selasky #define MLX5_UMR_ALIGN 2048
438e6e287fSHans Petter Selasky #ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING
448e6e287fSHans Petter Selasky static __be64 mlx5_ib_update_mtt_emergency_buffer[
458e6e287fSHans Petter Selasky 		MLX5_UMR_MTT_MIN_CHUNK_SIZE/sizeof(__be64)]
468e6e287fSHans Petter Selasky 	__aligned(MLX5_UMR_ALIGN);
478e6e287fSHans Petter Selasky static DEFINE_MUTEX(mlx5_ib_update_mtt_emergency_buffer_mutex);
488e6e287fSHans Petter Selasky #endif
4912515907SHans Petter Selasky 
508e6e287fSHans Petter Selasky static int clean_mr(struct mlx5_ib_mr *mr);
5112515907SHans Petter Selasky 
destroy_mkey(struct mlx5_ib_dev * dev,struct mlx5_ib_mr * mr)5212515907SHans Petter Selasky static int destroy_mkey(struct mlx5_ib_dev *dev, struct mlx5_ib_mr *mr)
5312515907SHans Petter Selasky {
548e6e287fSHans Petter Selasky 	int err = mlx5_core_destroy_mkey(dev->mdev, &mr->mmkey);
558e6e287fSHans Petter Selasky 
568e6e287fSHans Petter Selasky #ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING
578e6e287fSHans Petter Selasky 	/* Wait until all page fault handlers using the mr complete. */
588e6e287fSHans Petter Selasky 	synchronize_srcu(&dev->mr_srcu);
598e6e287fSHans Petter Selasky #endif
6012515907SHans Petter Selasky 
6112515907SHans Petter Selasky 	return err;
6212515907SHans Petter Selasky }
6312515907SHans Petter Selasky 
order2idx(struct mlx5_ib_dev * dev,int order)6412515907SHans Petter Selasky static int order2idx(struct mlx5_ib_dev *dev, int order)
6512515907SHans Petter Selasky {
6612515907SHans Petter Selasky 	struct mlx5_mr_cache *cache = &dev->cache;
6712515907SHans Petter Selasky 
6812515907SHans Petter Selasky 	if (order < cache->ent[0].order)
6912515907SHans Petter Selasky 		return 0;
7012515907SHans Petter Selasky 	else
7112515907SHans Petter Selasky 		return order - cache->ent[0].order;
7212515907SHans Petter Selasky }
7312515907SHans Petter Selasky 
use_umr_mtt_update(struct mlx5_ib_mr * mr,u64 start,u64 length)748e6e287fSHans Petter Selasky static bool use_umr_mtt_update(struct mlx5_ib_mr *mr, u64 start, u64 length)
758e6e287fSHans Petter Selasky {
768e6e287fSHans Petter Selasky 	return ((u64)1 << mr->order) * MLX5_ADAPTER_PAGE_SIZE >=
778e6e287fSHans Petter Selasky 		length + (start & (MLX5_ADAPTER_PAGE_SIZE - 1));
788e6e287fSHans Petter Selasky }
798e6e287fSHans Petter Selasky 
808e6e287fSHans Petter Selasky #ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING
update_odp_mr(struct mlx5_ib_mr * mr)818e6e287fSHans Petter Selasky static void update_odp_mr(struct mlx5_ib_mr *mr)
828e6e287fSHans Petter Selasky {
838e6e287fSHans Petter Selasky 	if (mr->umem->odp_data) {
848e6e287fSHans Petter Selasky 		/*
858e6e287fSHans Petter Selasky 		 * This barrier prevents the compiler from moving the
868e6e287fSHans Petter Selasky 		 * setting of umem->odp_data->private to point to our
878e6e287fSHans Petter Selasky 		 * MR, before reg_umr finished, to ensure that the MR
888e6e287fSHans Petter Selasky 		 * initialization have finished before starting to
898e6e287fSHans Petter Selasky 		 * handle invalidations.
908e6e287fSHans Petter Selasky 		 */
918e6e287fSHans Petter Selasky 		smp_wmb();
928e6e287fSHans Petter Selasky 		mr->umem->odp_data->private = mr;
938e6e287fSHans Petter Selasky 		/*
948e6e287fSHans Petter Selasky 		 * Make sure we will see the new
958e6e287fSHans Petter Selasky 		 * umem->odp_data->private value in the invalidation
968e6e287fSHans Petter Selasky 		 * routines, before we can get page faults on the
978e6e287fSHans Petter Selasky 		 * MR. Page faults can happen once we put the MR in
988e6e287fSHans Petter Selasky 		 * the tree, below this line. Without the barrier,
998e6e287fSHans Petter Selasky 		 * there can be a fault handling and an invalidation
1008e6e287fSHans Petter Selasky 		 * before umem->odp_data->private == mr is visible to
1018e6e287fSHans Petter Selasky 		 * the invalidation handler.
1028e6e287fSHans Petter Selasky 		 */
1038e6e287fSHans Petter Selasky 		smp_wmb();
1048e6e287fSHans Petter Selasky 	}
1058e6e287fSHans Petter Selasky }
1068e6e287fSHans Petter Selasky #endif
1078e6e287fSHans Petter Selasky 
reg_mr_callback(int status,struct mlx5_async_work * context)1087eefcb5eSHans Petter Selasky static void reg_mr_callback(int status, struct mlx5_async_work *context)
10912515907SHans Petter Selasky {
1107eefcb5eSHans Petter Selasky 	struct mlx5_ib_mr *mr =
1117eefcb5eSHans Petter Selasky 		container_of(context, struct mlx5_ib_mr, cb_work);
11212515907SHans Petter Selasky 	struct mlx5_ib_dev *dev = mr->dev;
11312515907SHans Petter Selasky 	struct mlx5_mr_cache *cache = &dev->cache;
11412515907SHans Petter Selasky 	int c = order2idx(dev, mr->order);
11512515907SHans Petter Selasky 	struct mlx5_cache_ent *ent = &cache->ent[c];
11612515907SHans Petter Selasky 	u8 key;
1178e6e287fSHans Petter Selasky 	unsigned long flags;
1188e6e287fSHans Petter Selasky 	struct mlx5_mr_table *table = &dev->mdev->priv.mr_table;
1198e6e287fSHans Petter Selasky 	int err;
12012515907SHans Petter Selasky 
12112515907SHans Petter Selasky 	spin_lock_irqsave(&ent->lock, flags);
12212515907SHans Petter Selasky 	ent->pending--;
12312515907SHans Petter Selasky 	spin_unlock_irqrestore(&ent->lock, flags);
12412515907SHans Petter Selasky 	if (status) {
1258e6e287fSHans Petter Selasky 		mlx5_ib_warn(dev, "async reg mr failed. status %d\n", status);
12612515907SHans Petter Selasky 		kfree(mr);
12712515907SHans Petter Selasky 		dev->fill_delay = 1;
12812515907SHans Petter Selasky 		mod_timer(&dev->delay_timer, jiffies + HZ);
12912515907SHans Petter Selasky 		return;
13012515907SHans Petter Selasky 	}
13112515907SHans Petter Selasky 
13212515907SHans Petter Selasky 	spin_lock_irqsave(&dev->mdev->priv.mkey_lock, flags);
13312515907SHans Petter Selasky 	key = dev->mdev->priv.mkey_key++;
13412515907SHans Petter Selasky 	spin_unlock_irqrestore(&dev->mdev->priv.mkey_lock, flags);
1358e6e287fSHans Petter Selasky 	mr->mmkey.key = mlx5_idx_to_mkey(MLX5_GET(create_mkey_out, mr->out, mkey_index)) | key;
13612515907SHans Petter Selasky 
13712515907SHans Petter Selasky 	cache->last_add = jiffies;
13812515907SHans Petter Selasky 
13912515907SHans Petter Selasky 	spin_lock_irqsave(&ent->lock, flags);
14012515907SHans Petter Selasky 	list_add_tail(&mr->list, &ent->head);
14112515907SHans Petter Selasky 	ent->cur++;
14212515907SHans Petter Selasky 	ent->size++;
14312515907SHans Petter Selasky 	spin_unlock_irqrestore(&ent->lock, flags);
14412515907SHans Petter Selasky 
14512515907SHans Petter Selasky 	spin_lock_irqsave(&table->lock, flags);
1468e6e287fSHans Petter Selasky 	err = radix_tree_insert(&table->tree, mlx5_mkey_to_idx(mr->mmkey.key),
1478e6e287fSHans Petter Selasky 				&mr->mmkey);
1488e6e287fSHans Petter Selasky 	if (err)
1498e6e287fSHans Petter Selasky 		pr_err("Error inserting to mkey tree. 0x%x\n", -err);
15012515907SHans Petter Selasky 	spin_unlock_irqrestore(&table->lock, flags);
15112515907SHans Petter Selasky }
15212515907SHans Petter Selasky 
add_keys(struct mlx5_ib_dev * dev,int c,int num)15312515907SHans Petter Selasky static int add_keys(struct mlx5_ib_dev *dev, int c, int num)
15412515907SHans Petter Selasky {
15512515907SHans Petter Selasky 	struct mlx5_mr_cache *cache = &dev->cache;
15612515907SHans Petter Selasky 	struct mlx5_cache_ent *ent = &cache->ent[c];
1578e6e287fSHans Petter Selasky 	int inlen = MLX5_ST_SZ_BYTES(create_mkey_in);
15812515907SHans Petter Selasky 	struct mlx5_ib_mr *mr;
15912515907SHans Petter Selasky 	int npages = 1 << ent->order;
1608e6e287fSHans Petter Selasky 	void *mkc;
1618e6e287fSHans Petter Selasky 	u32 *in;
16212515907SHans Petter Selasky 	int err = 0;
16312515907SHans Petter Selasky 	int i;
16412515907SHans Petter Selasky 
1658e6e287fSHans Petter Selasky 	in = kzalloc(inlen, GFP_KERNEL);
16612515907SHans Petter Selasky 	if (!in)
16712515907SHans Petter Selasky 		return -ENOMEM;
16812515907SHans Petter Selasky 
1698e6e287fSHans Petter Selasky 	mkc = MLX5_ADDR_OF(create_mkey_in, in, memory_key_mkey_entry);
17012515907SHans Petter Selasky 	for (i = 0; i < num; i++) {
17112515907SHans Petter Selasky 		if (ent->pending >= MAX_PENDING_REG_MR) {
17212515907SHans Petter Selasky 			err = -EAGAIN;
17312515907SHans Petter Selasky 			break;
17412515907SHans Petter Selasky 		}
17512515907SHans Petter Selasky 
17612515907SHans Petter Selasky 		mr = kzalloc(sizeof(*mr), GFP_KERNEL);
17712515907SHans Petter Selasky 		if (!mr) {
17812515907SHans Petter Selasky 			err = -ENOMEM;
17912515907SHans Petter Selasky 			break;
18012515907SHans Petter Selasky 		}
18112515907SHans Petter Selasky 		mr->order = ent->order;
18212515907SHans Petter Selasky 		mr->umred = 1;
18312515907SHans Petter Selasky 		mr->dev = dev;
1848e6e287fSHans Petter Selasky 
1858e6e287fSHans Petter Selasky 		MLX5_SET(mkc, mkc, free, 1);
1868e6e287fSHans Petter Selasky 		MLX5_SET(mkc, mkc, umr_en, 1);
1878e6e287fSHans Petter Selasky 		MLX5_SET(mkc, mkc, access_mode, MLX5_ACCESS_MODE_MTT);
1888e6e287fSHans Petter Selasky 
1898e6e287fSHans Petter Selasky 		MLX5_SET(mkc, mkc, qpn, 0xffffff);
1908e6e287fSHans Petter Selasky 		MLX5_SET(mkc, mkc, translations_octword_size, (npages + 1) / 2);
1918e6e287fSHans Petter Selasky 		MLX5_SET(mkc, mkc, log_page_size, 12);
19212515907SHans Petter Selasky 
19312515907SHans Petter Selasky 		spin_lock_irq(&ent->lock);
19412515907SHans Petter Selasky 		ent->pending++;
19512515907SHans Petter Selasky 		spin_unlock_irq(&ent->lock);
196788333d9SHans Petter Selasky 		err = mlx5_core_create_mkey_cb(dev->mdev, &mr->mmkey,
1977eefcb5eSHans Petter Selasky 					       &dev->async_ctx, in, inlen,
198788333d9SHans Petter Selasky 					       mr->out, sizeof(mr->out),
1997eefcb5eSHans Petter Selasky 					       reg_mr_callback, &mr->cb_work);
20012515907SHans Petter Selasky 		if (err) {
20112515907SHans Petter Selasky 			spin_lock_irq(&ent->lock);
20212515907SHans Petter Selasky 			ent->pending--;
20312515907SHans Petter Selasky 			spin_unlock_irq(&ent->lock);
20412515907SHans Petter Selasky 			mlx5_ib_warn(dev, "create mkey failed %d\n", err);
20512515907SHans Petter Selasky 			kfree(mr);
20612515907SHans Petter Selasky 			break;
20712515907SHans Petter Selasky 		}
20812515907SHans Petter Selasky 	}
20912515907SHans Petter Selasky 
21012515907SHans Petter Selasky 	kfree(in);
21112515907SHans Petter Selasky 	return err;
21212515907SHans Petter Selasky }
21312515907SHans Petter Selasky 
remove_keys(struct mlx5_ib_dev * dev,int c,int num)21412515907SHans Petter Selasky static void remove_keys(struct mlx5_ib_dev *dev, int c, int num)
21512515907SHans Petter Selasky {
21612515907SHans Petter Selasky 	struct mlx5_mr_cache *cache = &dev->cache;
21712515907SHans Petter Selasky 	struct mlx5_cache_ent *ent = &cache->ent[c];
21812515907SHans Petter Selasky 	struct mlx5_ib_mr *mr;
21912515907SHans Petter Selasky 	int err;
22012515907SHans Petter Selasky 	int i;
22112515907SHans Petter Selasky 
22212515907SHans Petter Selasky 	for (i = 0; i < num; i++) {
22312515907SHans Petter Selasky 		spin_lock_irq(&ent->lock);
22412515907SHans Petter Selasky 		if (list_empty(&ent->head)) {
22512515907SHans Petter Selasky 			spin_unlock_irq(&ent->lock);
22612515907SHans Petter Selasky 			return;
22712515907SHans Petter Selasky 		}
22812515907SHans Petter Selasky 		mr = list_first_entry(&ent->head, struct mlx5_ib_mr, list);
22912515907SHans Petter Selasky 		list_del(&mr->list);
23012515907SHans Petter Selasky 		ent->cur--;
23112515907SHans Petter Selasky 		ent->size--;
23212515907SHans Petter Selasky 		spin_unlock_irq(&ent->lock);
23312515907SHans Petter Selasky 		err = destroy_mkey(dev, mr);
23412515907SHans Petter Selasky 		if (err)
23512515907SHans Petter Selasky 			mlx5_ib_warn(dev, "failed destroy mkey\n");
23612515907SHans Petter Selasky 		else
23712515907SHans Petter Selasky 			kfree(mr);
23812515907SHans Petter Selasky 	}
23912515907SHans Petter Selasky }
24012515907SHans Petter Selasky 
someone_adding(struct mlx5_mr_cache * cache)24112515907SHans Petter Selasky static int someone_adding(struct mlx5_mr_cache *cache)
24212515907SHans Petter Selasky {
24312515907SHans Petter Selasky 	int i;
24412515907SHans Petter Selasky 
24512515907SHans Petter Selasky 	for (i = 0; i < MAX_MR_CACHE_ENTRIES; i++) {
24612515907SHans Petter Selasky 		if (cache->ent[i].cur < cache->ent[i].limit)
24712515907SHans Petter Selasky 			return 1;
24812515907SHans Petter Selasky 	}
24912515907SHans Petter Selasky 
25012515907SHans Petter Selasky 	return 0;
25112515907SHans Petter Selasky }
25212515907SHans Petter Selasky 
__cache_work_func(struct mlx5_cache_ent * ent)25312515907SHans Petter Selasky static void __cache_work_func(struct mlx5_cache_ent *ent)
25412515907SHans Petter Selasky {
25512515907SHans Petter Selasky 	struct mlx5_ib_dev *dev = ent->dev;
25612515907SHans Petter Selasky 	struct mlx5_mr_cache *cache = &dev->cache;
25712515907SHans Petter Selasky 	int i = order2idx(dev, ent->order);
25812515907SHans Petter Selasky 	int err;
25912515907SHans Petter Selasky 
26012515907SHans Petter Selasky 	if (cache->stopped)
26112515907SHans Petter Selasky 		return;
26212515907SHans Petter Selasky 
26312515907SHans Petter Selasky 	ent = &dev->cache.ent[i];
26412515907SHans Petter Selasky 	if (ent->cur < 2 * ent->limit && !dev->fill_delay) {
26512515907SHans Petter Selasky 		err = add_keys(dev, i, 1);
26612515907SHans Petter Selasky 		if (ent->cur < 2 * ent->limit) {
26712515907SHans Petter Selasky 			if (err == -EAGAIN) {
26812515907SHans Petter Selasky 				mlx5_ib_dbg(dev, "returned eagain, order %d\n",
26912515907SHans Petter Selasky 					    i + 2);
2708e6e287fSHans Petter Selasky 				queue_delayed_work(cache->wq, &ent->dwork,
2718e6e287fSHans Petter Selasky 						   msecs_to_jiffies(3));
27212515907SHans Petter Selasky 			} else if (err) {
27312515907SHans Petter Selasky 				mlx5_ib_warn(dev, "command failed order %d, err %d\n",
27412515907SHans Petter Selasky 					     i + 2, err);
2758e6e287fSHans Petter Selasky 				queue_delayed_work(cache->wq, &ent->dwork,
2768e6e287fSHans Petter Selasky 						   msecs_to_jiffies(1000));
27712515907SHans Petter Selasky 			} else {
2788e6e287fSHans Petter Selasky 				queue_work(cache->wq, &ent->work);
27912515907SHans Petter Selasky 			}
28012515907SHans Petter Selasky 		}
28112515907SHans Petter Selasky 	} else if (ent->cur > 2 * ent->limit) {
2828e6e287fSHans Petter Selasky 		/*
2838e6e287fSHans Petter Selasky 		 * The remove_keys() logic is performed as garbage collection
2848e6e287fSHans Petter Selasky 		 * task. Such task is intended to be run when no other active
2858e6e287fSHans Petter Selasky 		 * processes are running.
2868e6e287fSHans Petter Selasky 		 *
2878e6e287fSHans Petter Selasky 		 * The need_resched() will return TRUE if there are user tasks
2888e6e287fSHans Petter Selasky 		 * to be activated in near future.
2898e6e287fSHans Petter Selasky 		 *
2908e6e287fSHans Petter Selasky 		 * In such case, we don't execute remove_keys() and postpone
2918e6e287fSHans Petter Selasky 		 * the garbage collection work to try to run in next cycle,
2928e6e287fSHans Petter Selasky 		 * in order to free CPU resources to other tasks.
2938e6e287fSHans Petter Selasky 		 */
2948e6e287fSHans Petter Selasky 		if (!need_resched() && !someone_adding(cache) &&
2958e6e287fSHans Petter Selasky 		    time_after(jiffies, cache->last_add + 300 * HZ)) {
29612515907SHans Petter Selasky 			remove_keys(dev, i, 1);
29712515907SHans Petter Selasky 			if (ent->cur > ent->limit)
2988e6e287fSHans Petter Selasky 				queue_work(cache->wq, &ent->work);
2998e6e287fSHans Petter Selasky 		} else {
3008e6e287fSHans Petter Selasky 			queue_delayed_work(cache->wq, &ent->dwork, 300 * HZ);
30112515907SHans Petter Selasky 		}
30212515907SHans Petter Selasky 	}
30312515907SHans Petter Selasky }
30412515907SHans Petter Selasky 
delayed_cache_work_func(struct work_struct * work)30512515907SHans Petter Selasky static void delayed_cache_work_func(struct work_struct *work)
30612515907SHans Petter Selasky {
30712515907SHans Petter Selasky 	struct mlx5_cache_ent *ent;
30812515907SHans Petter Selasky 
30912515907SHans Petter Selasky 	ent = container_of(work, struct mlx5_cache_ent, dwork.work);
31012515907SHans Petter Selasky 	__cache_work_func(ent);
31112515907SHans Petter Selasky }
31212515907SHans Petter Selasky 
cache_work_func(struct work_struct * work)31312515907SHans Petter Selasky static void cache_work_func(struct work_struct *work)
31412515907SHans Petter Selasky {
31512515907SHans Petter Selasky 	struct mlx5_cache_ent *ent;
31612515907SHans Petter Selasky 
31712515907SHans Petter Selasky 	ent = container_of(work, struct mlx5_cache_ent, work);
31812515907SHans Petter Selasky 	__cache_work_func(ent);
31912515907SHans Petter Selasky }
32012515907SHans Petter Selasky 
alloc_cached_mr(struct mlx5_ib_dev * dev,int order)3218e6e287fSHans Petter Selasky static struct mlx5_ib_mr *alloc_cached_mr(struct mlx5_ib_dev *dev, int order)
3228e6e287fSHans Petter Selasky {
3238e6e287fSHans Petter Selasky 	struct mlx5_mr_cache *cache = &dev->cache;
3248e6e287fSHans Petter Selasky 	struct mlx5_ib_mr *mr = NULL;
3258e6e287fSHans Petter Selasky 	struct mlx5_cache_ent *ent;
3268e6e287fSHans Petter Selasky 	int c;
3278e6e287fSHans Petter Selasky 	int i;
3288e6e287fSHans Petter Selasky 
3298e6e287fSHans Petter Selasky 	c = order2idx(dev, order);
3308e6e287fSHans Petter Selasky 	if (c < 0 || c >= MAX_MR_CACHE_ENTRIES) {
3318e6e287fSHans Petter Selasky 		mlx5_ib_warn(dev, "order %d, cache index %d\n", order, c);
3328e6e287fSHans Petter Selasky 		return NULL;
3338e6e287fSHans Petter Selasky 	}
3348e6e287fSHans Petter Selasky 
3358e6e287fSHans Petter Selasky 	for (i = c; i < MAX_MR_CACHE_ENTRIES; i++) {
3368e6e287fSHans Petter Selasky 		ent = &cache->ent[i];
3378e6e287fSHans Petter Selasky 
3388e6e287fSHans Petter Selasky 		mlx5_ib_dbg(dev, "order %d, cache index %d\n", ent->order, i);
3398e6e287fSHans Petter Selasky 
3408e6e287fSHans Petter Selasky 		spin_lock_irq(&ent->lock);
3418e6e287fSHans Petter Selasky 		if (!list_empty(&ent->head)) {
3428e6e287fSHans Petter Selasky 			mr = list_first_entry(&ent->head, struct mlx5_ib_mr,
3438e6e287fSHans Petter Selasky 					      list);
3448e6e287fSHans Petter Selasky 			list_del(&mr->list);
3458e6e287fSHans Petter Selasky 			ent->cur--;
3468e6e287fSHans Petter Selasky 			spin_unlock_irq(&ent->lock);
3478e6e287fSHans Petter Selasky 			if (ent->cur < ent->limit)
3488e6e287fSHans Petter Selasky 				queue_work(cache->wq, &ent->work);
3498e6e287fSHans Petter Selasky 			break;
3508e6e287fSHans Petter Selasky 		}
3518e6e287fSHans Petter Selasky 		spin_unlock_irq(&ent->lock);
3528e6e287fSHans Petter Selasky 
3538e6e287fSHans Petter Selasky 		queue_work(cache->wq, &ent->work);
3548e6e287fSHans Petter Selasky 	}
3558e6e287fSHans Petter Selasky 
3568e6e287fSHans Petter Selasky 	if (!mr)
3578e6e287fSHans Petter Selasky 		cache->ent[c].miss++;
3588e6e287fSHans Petter Selasky 
3598e6e287fSHans Petter Selasky 	return mr;
3608e6e287fSHans Petter Selasky }
3618e6e287fSHans Petter Selasky 
free_cached_mr(struct mlx5_ib_dev * dev,struct mlx5_ib_mr * mr)36212515907SHans Petter Selasky static void free_cached_mr(struct mlx5_ib_dev *dev, struct mlx5_ib_mr *mr)
36312515907SHans Petter Selasky {
36412515907SHans Petter Selasky 	struct mlx5_mr_cache *cache = &dev->cache;
36512515907SHans Petter Selasky 	struct mlx5_cache_ent *ent;
36612515907SHans Petter Selasky 	int shrink = 0;
36712515907SHans Petter Selasky 	int c;
36812515907SHans Petter Selasky 
36912515907SHans Petter Selasky 	c = order2idx(dev, mr->order);
37012515907SHans Petter Selasky 	if (c < 0 || c >= MAX_MR_CACHE_ENTRIES) {
37112515907SHans Petter Selasky 		mlx5_ib_warn(dev, "order %d, cache index %d\n", mr->order, c);
37212515907SHans Petter Selasky 		return;
37312515907SHans Petter Selasky 	}
37412515907SHans Petter Selasky 	ent = &cache->ent[c];
37512515907SHans Petter Selasky 	spin_lock_irq(&ent->lock);
37612515907SHans Petter Selasky 	list_add_tail(&mr->list, &ent->head);
37712515907SHans Petter Selasky 	ent->cur++;
37812515907SHans Petter Selasky 	if (ent->cur > 2 * ent->limit)
37912515907SHans Petter Selasky 		shrink = 1;
38012515907SHans Petter Selasky 	spin_unlock_irq(&ent->lock);
38112515907SHans Petter Selasky 
38212515907SHans Petter Selasky 	if (shrink)
3838e6e287fSHans Petter Selasky 		queue_work(cache->wq, &ent->work);
38412515907SHans Petter Selasky }
38512515907SHans Petter Selasky 
clean_keys(struct mlx5_ib_dev * dev,int c)38612515907SHans Petter Selasky static void clean_keys(struct mlx5_ib_dev *dev, int c)
38712515907SHans Petter Selasky {
38812515907SHans Petter Selasky 	struct mlx5_mr_cache *cache = &dev->cache;
38912515907SHans Petter Selasky 	struct mlx5_cache_ent *ent = &cache->ent[c];
39012515907SHans Petter Selasky 	struct mlx5_ib_mr *mr;
39112515907SHans Petter Selasky 	int err;
39212515907SHans Petter Selasky 
39312515907SHans Petter Selasky 	cancel_delayed_work(&ent->dwork);
39412515907SHans Petter Selasky 	while (1) {
39512515907SHans Petter Selasky 		spin_lock_irq(&ent->lock);
39612515907SHans Petter Selasky 		if (list_empty(&ent->head)) {
39712515907SHans Petter Selasky 			spin_unlock_irq(&ent->lock);
39812515907SHans Petter Selasky 			return;
39912515907SHans Petter Selasky 		}
40012515907SHans Petter Selasky 		mr = list_first_entry(&ent->head, struct mlx5_ib_mr, list);
40112515907SHans Petter Selasky 		list_del(&mr->list);
40212515907SHans Petter Selasky 		ent->cur--;
40312515907SHans Petter Selasky 		ent->size--;
40412515907SHans Petter Selasky 		spin_unlock_irq(&ent->lock);
40512515907SHans Petter Selasky 		err = destroy_mkey(dev, mr);
40612515907SHans Petter Selasky 		if (err)
4078e6e287fSHans Petter Selasky 			mlx5_ib_warn(dev, "failed destroy mkey\n");
40812515907SHans Petter Selasky 		else
40912515907SHans Petter Selasky 			kfree(mr);
41012515907SHans Petter Selasky 	}
41112515907SHans Petter Selasky }
41212515907SHans Petter Selasky 
delay_time_func(unsigned long ctx)41312515907SHans Petter Selasky static void delay_time_func(unsigned long ctx)
41412515907SHans Petter Selasky {
41512515907SHans Petter Selasky 	struct mlx5_ib_dev *dev = (struct mlx5_ib_dev *)ctx;
41612515907SHans Petter Selasky 
41712515907SHans Petter Selasky 	dev->fill_delay = 0;
41812515907SHans Petter Selasky }
41912515907SHans Petter Selasky 
mlx5_mr_cache_init(struct mlx5_ib_dev * dev)42012515907SHans Petter Selasky int mlx5_mr_cache_init(struct mlx5_ib_dev *dev)
42112515907SHans Petter Selasky {
42212515907SHans Petter Selasky 	struct mlx5_mr_cache *cache = &dev->cache;
42312515907SHans Petter Selasky 	struct mlx5_cache_ent *ent;
42412515907SHans Petter Selasky 	int limit;
42512515907SHans Petter Selasky 	int i;
42612515907SHans Petter Selasky 
42712515907SHans Petter Selasky 	mutex_init(&dev->slow_path_mutex);
4288e6e287fSHans Petter Selasky 	cache->wq = alloc_ordered_workqueue("mkey_cache", WQ_MEM_RECLAIM);
42912515907SHans Petter Selasky 	if (!cache->wq) {
43012515907SHans Petter Selasky 		mlx5_ib_warn(dev, "failed to create work queue\n");
43112515907SHans Petter Selasky 		return -ENOMEM;
43212515907SHans Petter Selasky 	}
43312515907SHans Petter Selasky 
4347eefcb5eSHans Petter Selasky 	mlx5_cmd_init_async_ctx(dev->mdev, &dev->async_ctx);
4358e6e287fSHans Petter Selasky 	setup_timer(&dev->delay_timer, delay_time_func, (unsigned long)dev);
43612515907SHans Petter Selasky 	for (i = 0; i < MAX_MR_CACHE_ENTRIES; i++) {
43712515907SHans Petter Selasky 		INIT_LIST_HEAD(&cache->ent[i].head);
43812515907SHans Petter Selasky 		spin_lock_init(&cache->ent[i].lock);
43912515907SHans Petter Selasky 
44012515907SHans Petter Selasky 		ent = &cache->ent[i];
44112515907SHans Petter Selasky 		INIT_LIST_HEAD(&ent->head);
44212515907SHans Petter Selasky 		spin_lock_init(&ent->lock);
44312515907SHans Petter Selasky 		ent->order = i + 2;
44412515907SHans Petter Selasky 		ent->dev = dev;
44512515907SHans Petter Selasky 
4468e6e287fSHans Petter Selasky 		if (dev->mdev->profile->mask & MLX5_PROF_MASK_MR_CACHE)
44712515907SHans Petter Selasky 			limit = dev->mdev->profile->mr_cache[i].limit;
44812515907SHans Petter Selasky 		else
44912515907SHans Petter Selasky 			limit = 0;
45012515907SHans Petter Selasky 
45112515907SHans Petter Selasky 		INIT_WORK(&ent->work, cache_work_func);
45212515907SHans Petter Selasky 		INIT_DELAYED_WORK(&ent->dwork, delayed_cache_work_func);
45312515907SHans Petter Selasky 		ent->limit = limit;
4548e6e287fSHans Petter Selasky 		queue_work(cache->wq, &ent->work);
45512515907SHans Petter Selasky 	}
45612515907SHans Petter Selasky 
45712515907SHans Petter Selasky 	return 0;
45812515907SHans Petter Selasky }
45912515907SHans Petter Selasky 
mlx5_mr_cache_cleanup(struct mlx5_ib_dev * dev)46012515907SHans Petter Selasky int mlx5_mr_cache_cleanup(struct mlx5_ib_dev *dev)
46112515907SHans Petter Selasky {
46212515907SHans Petter Selasky 	int i;
46312515907SHans Petter Selasky 
46412515907SHans Petter Selasky 	dev->cache.stopped = 1;
46512515907SHans Petter Selasky 	flush_workqueue(dev->cache.wq);
4667eefcb5eSHans Petter Selasky 	mlx5_cmd_cleanup_async_ctx(&dev->async_ctx);
46712515907SHans Petter Selasky 
46812515907SHans Petter Selasky 	for (i = 0; i < MAX_MR_CACHE_ENTRIES; i++)
46912515907SHans Petter Selasky 		clean_keys(dev, i);
47012515907SHans Petter Selasky 
47112515907SHans Petter Selasky 	destroy_workqueue(dev->cache.wq);
47212515907SHans Petter Selasky 	del_timer_sync(&dev->delay_timer);
4738e6e287fSHans Petter Selasky 
47412515907SHans Petter Selasky 	return 0;
47512515907SHans Petter Selasky }
47612515907SHans Petter Selasky 
mlx5_ib_get_dma_mr(struct ib_pd * pd,int acc)47712515907SHans Petter Selasky struct ib_mr *mlx5_ib_get_dma_mr(struct ib_pd *pd, int acc)
47812515907SHans Petter Selasky {
47912515907SHans Petter Selasky 	struct mlx5_ib_dev *dev = to_mdev(pd->device);
4808e6e287fSHans Petter Selasky 	int inlen = MLX5_ST_SZ_BYTES(create_mkey_in);
48112515907SHans Petter Selasky 	struct mlx5_core_dev *mdev = dev->mdev;
48212515907SHans Petter Selasky 	struct mlx5_ib_mr *mr;
4838e6e287fSHans Petter Selasky 	void *mkc;
4848e6e287fSHans Petter Selasky 	u32 *in;
48512515907SHans Petter Selasky 	int err;
48612515907SHans Petter Selasky 
48712515907SHans Petter Selasky 	mr = kzalloc(sizeof(*mr), GFP_KERNEL);
48812515907SHans Petter Selasky 	if (!mr)
48912515907SHans Petter Selasky 		return ERR_PTR(-ENOMEM);
49012515907SHans Petter Selasky 
4918e6e287fSHans Petter Selasky 	in = kzalloc(inlen, GFP_KERNEL);
49212515907SHans Petter Selasky 	if (!in) {
49312515907SHans Petter Selasky 		err = -ENOMEM;
49412515907SHans Petter Selasky 		goto err_free;
49512515907SHans Petter Selasky 	}
49612515907SHans Petter Selasky 
4978e6e287fSHans Petter Selasky 	mkc = MLX5_ADDR_OF(create_mkey_in, in, memory_key_mkey_entry);
49812515907SHans Petter Selasky 
4998e6e287fSHans Petter Selasky 	MLX5_SET(mkc, mkc, access_mode, MLX5_ACCESS_MODE_PA);
5008e6e287fSHans Petter Selasky 	MLX5_SET(mkc, mkc, a, !!(acc & IB_ACCESS_REMOTE_ATOMIC));
5018e6e287fSHans Petter Selasky 	MLX5_SET(mkc, mkc, rw, !!(acc & IB_ACCESS_REMOTE_WRITE));
5028e6e287fSHans Petter Selasky 	MLX5_SET(mkc, mkc, rr, !!(acc & IB_ACCESS_REMOTE_READ));
5038e6e287fSHans Petter Selasky 	MLX5_SET(mkc, mkc, lw, !!(acc & IB_ACCESS_LOCAL_WRITE));
5048e6e287fSHans Petter Selasky 	MLX5_SET(mkc, mkc, lr, 1);
5058e6e287fSHans Petter Selasky 
5068e6e287fSHans Petter Selasky 	MLX5_SET(mkc, mkc, length64, 1);
5078e6e287fSHans Petter Selasky 	MLX5_SET(mkc, mkc, pd, to_mpd(pd)->pdn);
5088e6e287fSHans Petter Selasky 	MLX5_SET(mkc, mkc, qpn, 0xffffff);
5098e6e287fSHans Petter Selasky 	MLX5_SET64(mkc, mkc, start_addr, 0);
5108e6e287fSHans Petter Selasky 
511788333d9SHans Petter Selasky 	err = mlx5_core_create_mkey(mdev, &mr->mmkey, in, inlen);
51212515907SHans Petter Selasky 	if (err)
51312515907SHans Petter Selasky 		goto err_in;
51412515907SHans Petter Selasky 
51512515907SHans Petter Selasky 	kfree(in);
5168e6e287fSHans Petter Selasky 	mr->ibmr.lkey = mr->mmkey.key;
5178e6e287fSHans Petter Selasky 	mr->ibmr.rkey = mr->mmkey.key;
51812515907SHans Petter Selasky 	mr->umem = NULL;
51912515907SHans Petter Selasky 
52012515907SHans Petter Selasky 	return &mr->ibmr;
52112515907SHans Petter Selasky 
52212515907SHans Petter Selasky err_in:
52312515907SHans Petter Selasky 	kfree(in);
52412515907SHans Petter Selasky 
52512515907SHans Petter Selasky err_free:
52612515907SHans Petter Selasky 	kfree(mr);
52712515907SHans Petter Selasky 
52812515907SHans Petter Selasky 	return ERR_PTR(err);
52912515907SHans Petter Selasky }
53012515907SHans Petter Selasky 
get_octo_len(u64 addr,u64 len,int page_size)5318e6e287fSHans Petter Selasky static int get_octo_len(u64 addr, u64 len, int page_size)
53212515907SHans Petter Selasky {
53312515907SHans Petter Selasky 	u64 offset;
53412515907SHans Petter Selasky 	int npages;
53512515907SHans Petter Selasky 
5368e6e287fSHans Petter Selasky 	offset = addr & (page_size - 1);
53712515907SHans Petter Selasky 	npages = ALIGN(len + offset, page_size) >> ilog2(page_size);
53812515907SHans Petter Selasky 	return (npages + 1) / 2;
53912515907SHans Petter Selasky }
54012515907SHans Petter Selasky 
use_umr(int order)5418e6e287fSHans Petter Selasky static int use_umr(int order)
54212515907SHans Petter Selasky {
5438e6e287fSHans Petter Selasky 	return order <= MLX5_MAX_UMR_SHIFT;
54412515907SHans Petter Selasky }
54512515907SHans Petter Selasky 
dma_map_mr_pas(struct mlx5_ib_dev * dev,struct ib_umem * umem,int npages,int page_shift,int * size,__be64 ** mr_pas,dma_addr_t * dma)5468e6e287fSHans Petter Selasky static int dma_map_mr_pas(struct mlx5_ib_dev *dev, struct ib_umem *umem,
5478e6e287fSHans Petter Selasky 			  int npages, int page_shift, int *size,
5488e6e287fSHans Petter Selasky 			  __be64 **mr_pas, dma_addr_t *dma)
5498e6e287fSHans Petter Selasky {
5508e6e287fSHans Petter Selasky 	__be64 *pas;
5518e6e287fSHans Petter Selasky 	struct device *ddev = dev->ib_dev.dma_device;
5528e6e287fSHans Petter Selasky 
5538e6e287fSHans Petter Selasky 	/*
5548e6e287fSHans Petter Selasky 	 * UMR copies MTTs in units of MLX5_UMR_MTT_ALIGNMENT bytes.
5558e6e287fSHans Petter Selasky 	 * To avoid copying garbage after the pas array, we allocate
5568e6e287fSHans Petter Selasky 	 * a little more.
5578e6e287fSHans Petter Selasky 	 */
5588e6e287fSHans Petter Selasky 	*size = ALIGN(sizeof(u64) * npages, MLX5_UMR_MTT_ALIGNMENT);
5598e6e287fSHans Petter Selasky 	*mr_pas = kmalloc(*size + MLX5_UMR_ALIGN - 1, GFP_KERNEL);
5608e6e287fSHans Petter Selasky 	if (!(*mr_pas))
5618e6e287fSHans Petter Selasky 		return -ENOMEM;
5628e6e287fSHans Petter Selasky 
5638e6e287fSHans Petter Selasky 	pas = PTR_ALIGN(*mr_pas, MLX5_UMR_ALIGN);
5648e6e287fSHans Petter Selasky 	mlx5_ib_populate_pas(dev, umem, page_shift, pas, MLX5_IB_MTT_PRESENT);
5658e6e287fSHans Petter Selasky 	/* Clear padding after the actual pages. */
5668e6e287fSHans Petter Selasky 	memset(pas + npages, 0, *size - npages * sizeof(u64));
5678e6e287fSHans Petter Selasky 
5688e6e287fSHans Petter Selasky 	*dma = dma_map_single(ddev, pas, *size, DMA_TO_DEVICE);
5698e6e287fSHans Petter Selasky 	if (dma_mapping_error(ddev, *dma)) {
5708e6e287fSHans Petter Selasky 		kfree(*mr_pas);
5718e6e287fSHans Petter Selasky 		return -ENOMEM;
5728e6e287fSHans Petter Selasky 	}
5738e6e287fSHans Petter Selasky 
5748e6e287fSHans Petter Selasky 	return 0;
5758e6e287fSHans Petter Selasky }
5768e6e287fSHans Petter Selasky 
prep_umr_wqe_common(struct ib_pd * pd,struct mlx5_umr_wr * umrwr,struct ib_sge * sg,u64 dma,int n,u32 key,int page_shift)577c3987b8eSHans Petter Selasky static void prep_umr_wqe_common(struct ib_pd *pd, struct mlx5_umr_wr *umrwr,
5788e6e287fSHans Petter Selasky 				struct ib_sge *sg, u64 dma, int n, u32 key,
5798e6e287fSHans Petter Selasky 				int page_shift)
58012515907SHans Petter Selasky {
58112515907SHans Petter Selasky 	struct mlx5_ib_dev *dev = to_mdev(pd->device);
5828e6e287fSHans Petter Selasky 
5838e6e287fSHans Petter Selasky 	sg->addr = dma;
5848e6e287fSHans Petter Selasky 	sg->length = ALIGN(sizeof(u64) * n, 64);
5858e6e287fSHans Petter Selasky 	sg->lkey = dev->umrc.pd->local_dma_lkey;
5868e6e287fSHans Petter Selasky 
587c3987b8eSHans Petter Selasky 	umrwr->wr.next = NULL;
588c3987b8eSHans Petter Selasky 	umrwr->wr.sg_list = sg;
5898e6e287fSHans Petter Selasky 	if (n)
590c3987b8eSHans Petter Selasky 		umrwr->wr.num_sge = 1;
5918e6e287fSHans Petter Selasky 	else
592c3987b8eSHans Petter Selasky 		umrwr->wr.num_sge = 0;
5938e6e287fSHans Petter Selasky 
594c3987b8eSHans Petter Selasky 	umrwr->wr.opcode = MLX5_IB_WR_UMR;
5958e6e287fSHans Petter Selasky 
5968e6e287fSHans Petter Selasky 	umrwr->npages = n;
5978e6e287fSHans Petter Selasky 	umrwr->page_shift = page_shift;
5988e6e287fSHans Petter Selasky 	umrwr->mkey = key;
5998e6e287fSHans Petter Selasky }
6008e6e287fSHans Petter Selasky 
prep_umr_reg_wqe(struct ib_pd * pd,struct mlx5_umr_wr * umrwr,struct ib_sge * sg,u64 dma,int n,u32 key,int page_shift,u64 virt_addr,u64 len,int access_flags)601c3987b8eSHans Petter Selasky static void prep_umr_reg_wqe(struct ib_pd *pd, struct mlx5_umr_wr *umrwr,
6028e6e287fSHans Petter Selasky 			     struct ib_sge *sg, u64 dma, int n, u32 key,
6038e6e287fSHans Petter Selasky 			     int page_shift, u64 virt_addr, u64 len,
6048e6e287fSHans Petter Selasky 			     int access_flags)
6058e6e287fSHans Petter Selasky {
606c3987b8eSHans Petter Selasky 	prep_umr_wqe_common(pd, umrwr, sg, dma, n, key, page_shift);
6078e6e287fSHans Petter Selasky 
608c3987b8eSHans Petter Selasky 	umrwr->wr.send_flags = 0;
6098e6e287fSHans Petter Selasky 
6108e6e287fSHans Petter Selasky 	umrwr->target.virt_addr = virt_addr;
6118e6e287fSHans Petter Selasky 	umrwr->length = len;
6128e6e287fSHans Petter Selasky 	umrwr->access_flags = access_flags;
6138e6e287fSHans Petter Selasky 	umrwr->pd = pd;
6148e6e287fSHans Petter Selasky }
6158e6e287fSHans Petter Selasky 
prep_umr_unreg_wqe(struct mlx5_ib_dev * dev,struct mlx5_umr_wr * umrwr,u32 key)6168e6e287fSHans Petter Selasky static void prep_umr_unreg_wqe(struct mlx5_ib_dev *dev,
617c3987b8eSHans Petter Selasky 			       struct mlx5_umr_wr *umrwr, u32 key)
6188e6e287fSHans Petter Selasky {
619c3987b8eSHans Petter Selasky 	umrwr->wr.send_flags = MLX5_IB_SEND_UMR_UNREG | MLX5_IB_SEND_UMR_FAIL_IF_FREE;
620c3987b8eSHans Petter Selasky 	umrwr->wr.opcode = MLX5_IB_WR_UMR;
6218e6e287fSHans Petter Selasky 	umrwr->mkey = key;
6228e6e287fSHans Petter Selasky }
6238e6e287fSHans Petter Selasky 
mr_umem_get(struct ib_pd * pd,u64 start,u64 length,int access_flags,int * npages,int * page_shift,int * ncont,int * order)6248e6e287fSHans Petter Selasky static struct ib_umem *mr_umem_get(struct ib_pd *pd, u64 start, u64 length,
6258e6e287fSHans Petter Selasky 				   int access_flags, int *npages,
6268e6e287fSHans Petter Selasky 				   int *page_shift, int *ncont, int *order)
6278e6e287fSHans Petter Selasky {
6288e6e287fSHans Petter Selasky 	struct mlx5_ib_dev *dev = to_mdev(pd->device);
6298e6e287fSHans Petter Selasky 	struct ib_umem *umem = ib_umem_get(pd->uobject->context, start, length,
6308e6e287fSHans Petter Selasky 					   access_flags, 0);
6318e6e287fSHans Petter Selasky 	if (IS_ERR(umem)) {
6328e6e287fSHans Petter Selasky 		mlx5_ib_err(dev, "umem get failed (%ld)\n", PTR_ERR(umem));
6338e6e287fSHans Petter Selasky 		return (void *)umem;
6348e6e287fSHans Petter Selasky 	}
6358e6e287fSHans Petter Selasky 
636565cb4e8SHans Petter Selasky 	mlx5_ib_cont_pages(umem, start, MLX5_MKEY_PAGE_SHIFT_MASK, npages, page_shift, ncont, order);
6378e6e287fSHans Petter Selasky 	if (!*npages) {
6388e6e287fSHans Petter Selasky 		mlx5_ib_warn(dev, "avoid zero region\n");
6398e6e287fSHans Petter Selasky 		ib_umem_release(umem);
6408e6e287fSHans Petter Selasky 		return ERR_PTR(-EINVAL);
6418e6e287fSHans Petter Selasky 	}
6428e6e287fSHans Petter Selasky 
6438e6e287fSHans Petter Selasky 	mlx5_ib_dbg(dev, "npages %d, ncont %d, order %d, page_shift %d\n",
6448e6e287fSHans Petter Selasky 		    *npages, *ncont, *order, *page_shift);
6458e6e287fSHans Petter Selasky 
6468e6e287fSHans Petter Selasky 	return umem;
6478e6e287fSHans Petter Selasky }
6488e6e287fSHans Petter Selasky 
mlx5_ib_umr_done(struct ib_cq * cq,struct ib_wc * wc)6498e6e287fSHans Petter Selasky static void mlx5_ib_umr_done(struct ib_cq *cq, struct ib_wc *wc)
6508e6e287fSHans Petter Selasky {
6518e6e287fSHans Petter Selasky 	struct mlx5_ib_umr_context *context =
6528e6e287fSHans Petter Selasky 		container_of(wc->wr_cqe, struct mlx5_ib_umr_context, cqe);
6538e6e287fSHans Petter Selasky 
6548e6e287fSHans Petter Selasky 	context->status = wc->status;
6558e6e287fSHans Petter Selasky 	complete(&context->done);
6568e6e287fSHans Petter Selasky }
6578e6e287fSHans Petter Selasky 
mlx5_ib_init_umr_context(struct mlx5_ib_umr_context * context)6588e6e287fSHans Petter Selasky static inline void mlx5_ib_init_umr_context(struct mlx5_ib_umr_context *context)
6598e6e287fSHans Petter Selasky {
6608e6e287fSHans Petter Selasky 	context->cqe.done = mlx5_ib_umr_done;
6618e6e287fSHans Petter Selasky 	context->status = -1;
6628e6e287fSHans Petter Selasky 	init_completion(&context->done);
6638e6e287fSHans Petter Selasky }
6648e6e287fSHans Petter Selasky 
reg_umr(struct ib_pd * pd,struct ib_umem * umem,u64 virt_addr,u64 len,int npages,int page_shift,int order,int access_flags)6658e6e287fSHans Petter Selasky static struct mlx5_ib_mr *reg_umr(struct ib_pd *pd, struct ib_umem *umem,
6668e6e287fSHans Petter Selasky 				  u64 virt_addr, u64 len, int npages,
6678e6e287fSHans Petter Selasky 				  int page_shift, int order, int access_flags)
6688e6e287fSHans Petter Selasky {
6698e6e287fSHans Petter Selasky 	struct mlx5_ib_dev *dev = to_mdev(pd->device);
6708e6e287fSHans Petter Selasky 	struct device *ddev = dev->ib_dev.dma_device;
6718e6e287fSHans Petter Selasky 	struct umr_common *umrc = &dev->umrc;
6728e6e287fSHans Petter Selasky 	struct mlx5_ib_umr_context umr_context;
6738e6e287fSHans Petter Selasky 	struct mlx5_umr_wr umrwr = {};
674c3987b8eSHans Petter Selasky 	const struct ib_send_wr *bad;
67512515907SHans Petter Selasky 	struct mlx5_ib_mr *mr;
6768e6e287fSHans Petter Selasky 	struct ib_sge sg;
6778e6e287fSHans Petter Selasky 	int size;
6788e6e287fSHans Petter Selasky 	__be64 *mr_pas;
6798e6e287fSHans Petter Selasky 	dma_addr_t dma;
6808e6e287fSHans Petter Selasky 	int err = 0;
6818e6e287fSHans Petter Selasky 	int i;
6828e6e287fSHans Petter Selasky 
6838e6e287fSHans Petter Selasky 	for (i = 0; i < 1; i++) {
6848e6e287fSHans Petter Selasky 		mr = alloc_cached_mr(dev, order);
6858e6e287fSHans Petter Selasky 		if (mr)
6868e6e287fSHans Petter Selasky 			break;
6878e6e287fSHans Petter Selasky 
6888e6e287fSHans Petter Selasky 		err = add_keys(dev, order2idx(dev, order), 1);
6898e6e287fSHans Petter Selasky 		if (err && err != -EAGAIN) {
6908e6e287fSHans Petter Selasky 			mlx5_ib_warn(dev, "add_keys failed, err %d\n", err);
6918e6e287fSHans Petter Selasky 			break;
6928e6e287fSHans Petter Selasky 		}
6938e6e287fSHans Petter Selasky 	}
6948e6e287fSHans Petter Selasky 
6958e6e287fSHans Petter Selasky 	if (!mr)
6968e6e287fSHans Petter Selasky 		return ERR_PTR(-EAGAIN);
6978e6e287fSHans Petter Selasky 
6988e6e287fSHans Petter Selasky 	err = dma_map_mr_pas(dev, umem, npages, page_shift, &size, &mr_pas,
6998e6e287fSHans Petter Selasky 			     &dma);
7008e6e287fSHans Petter Selasky 	if (err)
7018e6e287fSHans Petter Selasky 		goto free_mr;
7028e6e287fSHans Petter Selasky 
7038e6e287fSHans Petter Selasky 	mlx5_ib_init_umr_context(&umr_context);
7048e6e287fSHans Petter Selasky 
7058e6e287fSHans Petter Selasky 	umrwr.wr.wr_cqe = &umr_context.cqe;
706c3987b8eSHans Petter Selasky 	prep_umr_reg_wqe(pd, &umrwr, &sg, dma, npages, mr->mmkey.key,
7078e6e287fSHans Petter Selasky 			 page_shift, virt_addr, len, access_flags);
7088e6e287fSHans Petter Selasky 
7098e6e287fSHans Petter Selasky 	down(&umrc->sem);
7108e6e287fSHans Petter Selasky 	err = ib_post_send(umrc->qp, &umrwr.wr, &bad);
7118e6e287fSHans Petter Selasky 	if (err) {
7128e6e287fSHans Petter Selasky 		mlx5_ib_warn(dev, "post send failed, err %d\n", err);
7138e6e287fSHans Petter Selasky 		goto unmap_dma;
7148e6e287fSHans Petter Selasky 	} else {
7158e6e287fSHans Petter Selasky 		wait_for_completion(&umr_context.done);
7168e6e287fSHans Petter Selasky 		if (umr_context.status != IB_WC_SUCCESS) {
7178e6e287fSHans Petter Selasky 			mlx5_ib_warn(dev, "reg umr failed\n");
7188e6e287fSHans Petter Selasky 			err = -EFAULT;
7198e6e287fSHans Petter Selasky 		}
7208e6e287fSHans Petter Selasky 	}
7218e6e287fSHans Petter Selasky 
7228e6e287fSHans Petter Selasky 	mr->mmkey.iova = virt_addr;
7238e6e287fSHans Petter Selasky 	mr->mmkey.size = len;
7248e6e287fSHans Petter Selasky 	mr->mmkey.pd = to_mpd(pd)->pdn;
7258e6e287fSHans Petter Selasky 
7268e6e287fSHans Petter Selasky 	mr->live = 1;
7278e6e287fSHans Petter Selasky 
7288e6e287fSHans Petter Selasky unmap_dma:
7298e6e287fSHans Petter Selasky 	up(&umrc->sem);
7308e6e287fSHans Petter Selasky 	dma_unmap_single(ddev, dma, size, DMA_TO_DEVICE);
7318e6e287fSHans Petter Selasky 
7328e6e287fSHans Petter Selasky 	kfree(mr_pas);
7338e6e287fSHans Petter Selasky 
7348e6e287fSHans Petter Selasky free_mr:
7358e6e287fSHans Petter Selasky 	if (err) {
7368e6e287fSHans Petter Selasky 		free_cached_mr(dev, mr);
7378e6e287fSHans Petter Selasky 		return ERR_PTR(err);
7388e6e287fSHans Petter Selasky 	}
7398e6e287fSHans Petter Selasky 
7408e6e287fSHans Petter Selasky 	return mr;
7418e6e287fSHans Petter Selasky }
7428e6e287fSHans Petter Selasky 
7438e6e287fSHans Petter Selasky #ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING
mlx5_ib_update_mtt(struct mlx5_ib_mr * mr,u64 start_page_index,int npages,int zap)7448e6e287fSHans Petter Selasky int mlx5_ib_update_mtt(struct mlx5_ib_mr *mr, u64 start_page_index, int npages,
7458e6e287fSHans Petter Selasky 		       int zap)
7468e6e287fSHans Petter Selasky {
7478e6e287fSHans Petter Selasky 	struct mlx5_ib_dev *dev = mr->dev;
7488e6e287fSHans Petter Selasky 	struct device *ddev = dev->ib_dev.dma_device;
7498e6e287fSHans Petter Selasky 	struct umr_common *umrc = &dev->umrc;
7508e6e287fSHans Petter Selasky 	struct mlx5_ib_umr_context umr_context;
7518e6e287fSHans Petter Selasky 	struct ib_umem *umem = mr->umem;
7528e6e287fSHans Petter Selasky 	int size;
7538e6e287fSHans Petter Selasky 	__be64 *pas;
7548e6e287fSHans Petter Selasky 	dma_addr_t dma;
755c3987b8eSHans Petter Selasky 	const struct ib_send_wr *bad;
7568e6e287fSHans Petter Selasky 	struct mlx5_umr_wr wr;
7578e6e287fSHans Petter Selasky 	struct ib_sge sg;
7588e6e287fSHans Petter Selasky 	int err = 0;
7598e6e287fSHans Petter Selasky 	const int page_index_alignment = MLX5_UMR_MTT_ALIGNMENT / sizeof(u64);
7608e6e287fSHans Petter Selasky 	const int page_index_mask = page_index_alignment - 1;
7618e6e287fSHans Petter Selasky 	size_t pages_mapped = 0;
7628e6e287fSHans Petter Selasky 	size_t pages_to_map = 0;
7638e6e287fSHans Petter Selasky 	size_t pages_iter = 0;
7648e6e287fSHans Petter Selasky 	int use_emergency_buf = 0;
7658e6e287fSHans Petter Selasky 
7668e6e287fSHans Petter Selasky 	/* UMR copies MTTs in units of MLX5_UMR_MTT_ALIGNMENT bytes,
7678e6e287fSHans Petter Selasky 	 * so we need to align the offset and length accordingly */
7688e6e287fSHans Petter Selasky 	if (start_page_index & page_index_mask) {
7698e6e287fSHans Petter Selasky 		npages += start_page_index & page_index_mask;
7708e6e287fSHans Petter Selasky 		start_page_index &= ~page_index_mask;
7718e6e287fSHans Petter Selasky 	}
7728e6e287fSHans Petter Selasky 
7738e6e287fSHans Petter Selasky 	pages_to_map = ALIGN(npages, page_index_alignment);
7748e6e287fSHans Petter Selasky 
7758e6e287fSHans Petter Selasky 	if (start_page_index + pages_to_map > MLX5_MAX_UMR_PAGES)
7768e6e287fSHans Petter Selasky 		return -EINVAL;
7778e6e287fSHans Petter Selasky 
7788e6e287fSHans Petter Selasky 	size = sizeof(u64) * pages_to_map;
7798e6e287fSHans Petter Selasky 	size = min_t(int, PAGE_SIZE, size);
7808e6e287fSHans Petter Selasky 	/* We allocate with GFP_ATOMIC to avoid recursion into page-reclaim
7818e6e287fSHans Petter Selasky 	 * code, when we are called from an invalidation. The pas buffer must
7828e6e287fSHans Petter Selasky 	 * be 2k-aligned for Connect-IB. */
7838e6e287fSHans Petter Selasky 	pas = (__be64 *)get_zeroed_page(GFP_ATOMIC);
7848e6e287fSHans Petter Selasky 	if (!pas) {
7858e6e287fSHans Petter Selasky 		mlx5_ib_warn(dev, "unable to allocate memory during MTT update, falling back to slower chunked mechanism.\n");
7868e6e287fSHans Petter Selasky 		pas = mlx5_ib_update_mtt_emergency_buffer;
7878e6e287fSHans Petter Selasky 		size = MLX5_UMR_MTT_MIN_CHUNK_SIZE;
7888e6e287fSHans Petter Selasky 		use_emergency_buf = 1;
7898e6e287fSHans Petter Selasky 		mutex_lock(&mlx5_ib_update_mtt_emergency_buffer_mutex);
7908e6e287fSHans Petter Selasky 		memset(pas, 0, size);
7918e6e287fSHans Petter Selasky 	}
7928e6e287fSHans Petter Selasky 	pages_iter = size / sizeof(u64);
7938e6e287fSHans Petter Selasky 	dma = dma_map_single(ddev, pas, size, DMA_TO_DEVICE);
7948e6e287fSHans Petter Selasky 	if (dma_mapping_error(ddev, dma)) {
7958e6e287fSHans Petter Selasky 		mlx5_ib_err(dev, "unable to map DMA during MTT update.\n");
7968e6e287fSHans Petter Selasky 		err = -ENOMEM;
7978e6e287fSHans Petter Selasky 		goto free_pas;
7988e6e287fSHans Petter Selasky 	}
7998e6e287fSHans Petter Selasky 
8008e6e287fSHans Petter Selasky 	for (pages_mapped = 0;
8018e6e287fSHans Petter Selasky 	     pages_mapped < pages_to_map && !err;
8028e6e287fSHans Petter Selasky 	     pages_mapped += pages_iter, start_page_index += pages_iter) {
8038e6e287fSHans Petter Selasky 		dma_sync_single_for_cpu(ddev, dma, size, DMA_TO_DEVICE);
8048e6e287fSHans Petter Selasky 
8058e6e287fSHans Petter Selasky 		npages = min_t(size_t,
8068e6e287fSHans Petter Selasky 			       pages_iter,
8078e6e287fSHans Petter Selasky 			       ib_umem_num_pages(umem) - start_page_index);
8088e6e287fSHans Petter Selasky 
8098e6e287fSHans Petter Selasky 		if (!zap) {
8108e6e287fSHans Petter Selasky 			__mlx5_ib_populate_pas(dev, umem, PAGE_SHIFT,
8118e6e287fSHans Petter Selasky 					       start_page_index, npages, pas,
8128e6e287fSHans Petter Selasky 					       MLX5_IB_MTT_PRESENT);
8138e6e287fSHans Petter Selasky 			/* Clear padding after the pages brought from the
8148e6e287fSHans Petter Selasky 			 * umem. */
8158e6e287fSHans Petter Selasky 			memset(pas + npages, 0, size - npages * sizeof(u64));
8168e6e287fSHans Petter Selasky 		}
8178e6e287fSHans Petter Selasky 
8188e6e287fSHans Petter Selasky 		dma_sync_single_for_device(ddev, dma, size, DMA_TO_DEVICE);
8198e6e287fSHans Petter Selasky 
8208e6e287fSHans Petter Selasky 		mlx5_ib_init_umr_context(&umr_context);
8218e6e287fSHans Petter Selasky 
8228e6e287fSHans Petter Selasky 		memset(&wr, 0, sizeof(wr));
8238e6e287fSHans Petter Selasky 		wr.wr.wr_cqe = &umr_context.cqe;
8248e6e287fSHans Petter Selasky 
8258e6e287fSHans Petter Selasky 		sg.addr = dma;
8268e6e287fSHans Petter Selasky 		sg.length = ALIGN(npages * sizeof(u64),
8278e6e287fSHans Petter Selasky 				MLX5_UMR_MTT_ALIGNMENT);
8288e6e287fSHans Petter Selasky 		sg.lkey = dev->umrc.pd->local_dma_lkey;
8298e6e287fSHans Petter Selasky 
8308e6e287fSHans Petter Selasky 		wr.wr.send_flags = MLX5_IB_SEND_UMR_FAIL_IF_FREE |
8318e6e287fSHans Petter Selasky 				MLX5_IB_SEND_UMR_UPDATE_MTT;
8328e6e287fSHans Petter Selasky 		wr.wr.sg_list = &sg;
8338e6e287fSHans Petter Selasky 		wr.wr.num_sge = 1;
8348e6e287fSHans Petter Selasky 		wr.wr.opcode = MLX5_IB_WR_UMR;
8358e6e287fSHans Petter Selasky 		wr.npages = sg.length / sizeof(u64);
8368e6e287fSHans Petter Selasky 		wr.page_shift = PAGE_SHIFT;
8378e6e287fSHans Petter Selasky 		wr.mkey = mr->mmkey.key;
8388e6e287fSHans Petter Selasky 		wr.target.offset = start_page_index;
8398e6e287fSHans Petter Selasky 
8408e6e287fSHans Petter Selasky 		down(&umrc->sem);
8418e6e287fSHans Petter Selasky 		err = ib_post_send(umrc->qp, &wr.wr, &bad);
8428e6e287fSHans Petter Selasky 		if (err) {
8438e6e287fSHans Petter Selasky 			mlx5_ib_err(dev, "UMR post send failed, err %d\n", err);
8448e6e287fSHans Petter Selasky 		} else {
8458e6e287fSHans Petter Selasky 			wait_for_completion(&umr_context.done);
8468e6e287fSHans Petter Selasky 			if (umr_context.status != IB_WC_SUCCESS) {
8478e6e287fSHans Petter Selasky 				mlx5_ib_err(dev, "UMR completion failed, code %d\n",
8488e6e287fSHans Petter Selasky 					    umr_context.status);
8498e6e287fSHans Petter Selasky 				err = -EFAULT;
8508e6e287fSHans Petter Selasky 			}
8518e6e287fSHans Petter Selasky 		}
8528e6e287fSHans Petter Selasky 		up(&umrc->sem);
8538e6e287fSHans Petter Selasky 	}
8548e6e287fSHans Petter Selasky 	dma_unmap_single(ddev, dma, size, DMA_TO_DEVICE);
8558e6e287fSHans Petter Selasky 
8568e6e287fSHans Petter Selasky free_pas:
8578e6e287fSHans Petter Selasky 	if (!use_emergency_buf)
8588e6e287fSHans Petter Selasky 		free_page((unsigned long)pas);
8598e6e287fSHans Petter Selasky 	else
8608e6e287fSHans Petter Selasky 		mutex_unlock(&mlx5_ib_update_mtt_emergency_buffer_mutex);
8618e6e287fSHans Petter Selasky 
8628e6e287fSHans Petter Selasky 	return err;
8638e6e287fSHans Petter Selasky }
8648e6e287fSHans Petter Selasky #endif
8658e6e287fSHans Petter Selasky 
8668e6e287fSHans Petter Selasky /*
8678e6e287fSHans Petter Selasky  * If ibmr is NULL it will be allocated by reg_create.
8688e6e287fSHans Petter Selasky  * Else, the given ibmr will be used.
8698e6e287fSHans Petter Selasky  */
reg_create(struct ib_mr * ibmr,struct ib_pd * pd,u64 virt_addr,u64 length,struct ib_umem * umem,int npages,int page_shift,int access_flags)8708e6e287fSHans Petter Selasky static struct mlx5_ib_mr *reg_create(struct ib_mr *ibmr, struct ib_pd *pd,
8718e6e287fSHans Petter Selasky 				     u64 virt_addr, u64 length,
8728e6e287fSHans Petter Selasky 				     struct ib_umem *umem, int npages,
8738e6e287fSHans Petter Selasky 				     int page_shift, int access_flags)
8748e6e287fSHans Petter Selasky {
8758e6e287fSHans Petter Selasky 	struct mlx5_ib_dev *dev = to_mdev(pd->device);
8768e6e287fSHans Petter Selasky 	struct mlx5_ib_mr *mr;
8778e6e287fSHans Petter Selasky 	__be64 *pas;
8788e6e287fSHans Petter Selasky 	void *mkc;
87912515907SHans Petter Selasky 	int inlen;
8808e6e287fSHans Petter Selasky 	u32 *in;
88112515907SHans Petter Selasky 	int err;
88212515907SHans Petter Selasky 	bool pg_cap = !!(MLX5_CAP_GEN(dev->mdev, pg));
88312515907SHans Petter Selasky 
8848e6e287fSHans Petter Selasky 	mr = ibmr ? to_mmr(ibmr) : kzalloc(sizeof(*mr), GFP_KERNEL);
88512515907SHans Petter Selasky 	if (!mr)
88612515907SHans Petter Selasky 		return ERR_PTR(-ENOMEM);
88712515907SHans Petter Selasky 
8888e6e287fSHans Petter Selasky 	inlen = MLX5_ST_SZ_BYTES(create_mkey_in) +
8898e6e287fSHans Petter Selasky 		sizeof(*pas) * ((npages + 1) / 2) * 2;
89012515907SHans Petter Selasky 	in = mlx5_vzalloc(inlen);
89112515907SHans Petter Selasky 	if (!in) {
89212515907SHans Petter Selasky 		err = -ENOMEM;
89312515907SHans Petter Selasky 		goto err_1;
89412515907SHans Petter Selasky 	}
8958e6e287fSHans Petter Selasky 	pas = (__be64 *)MLX5_ADDR_OF(create_mkey_in, in, klm_pas_mtt);
8968e6e287fSHans Petter Selasky 	mlx5_ib_populate_pas(dev, umem, page_shift, pas,
89712515907SHans Petter Selasky 			     pg_cap ? MLX5_IB_MTT_PRESENT : 0);
89812515907SHans Petter Selasky 
8998e6e287fSHans Petter Selasky 	/* The pg_access bit allows setting the access flags
90012515907SHans Petter Selasky 	 * in the page list submitted with the command. */
9018e6e287fSHans Petter Selasky 	MLX5_SET(create_mkey_in, in, pg_access, !!(pg_cap));
9028e6e287fSHans Petter Selasky 
9038e6e287fSHans Petter Selasky 	mkc = MLX5_ADDR_OF(create_mkey_in, in, memory_key_mkey_entry);
9048e6e287fSHans Petter Selasky 	MLX5_SET(mkc, mkc, access_mode, MLX5_ACCESS_MODE_MTT);
9058e6e287fSHans Petter Selasky 	MLX5_SET(mkc, mkc, a, !!(access_flags & IB_ACCESS_REMOTE_ATOMIC));
9068e6e287fSHans Petter Selasky 	MLX5_SET(mkc, mkc, rw, !!(access_flags & IB_ACCESS_REMOTE_WRITE));
9078e6e287fSHans Petter Selasky 	MLX5_SET(mkc, mkc, rr, !!(access_flags & IB_ACCESS_REMOTE_READ));
9088e6e287fSHans Petter Selasky 	MLX5_SET(mkc, mkc, lw, !!(access_flags & IB_ACCESS_LOCAL_WRITE));
9098e6e287fSHans Petter Selasky 	MLX5_SET(mkc, mkc, lr, 1);
9108e6e287fSHans Petter Selasky 
9118e6e287fSHans Petter Selasky 	MLX5_SET64(mkc, mkc, start_addr, virt_addr);
9128e6e287fSHans Petter Selasky 	MLX5_SET64(mkc, mkc, len, length);
9138e6e287fSHans Petter Selasky 	MLX5_SET(mkc, mkc, pd, to_mpd(pd)->pdn);
9148e6e287fSHans Petter Selasky 	MLX5_SET(mkc, mkc, bsf_octword_size, 0);
9158e6e287fSHans Petter Selasky 	MLX5_SET(mkc, mkc, translations_octword_size,
9168e6e287fSHans Petter Selasky 		 get_octo_len(virt_addr, length, 1 << page_shift));
9178e6e287fSHans Petter Selasky 	MLX5_SET(mkc, mkc, log_page_size, page_shift);
9188e6e287fSHans Petter Selasky 	MLX5_SET(mkc, mkc, qpn, 0xffffff);
9198e6e287fSHans Petter Selasky 	MLX5_SET(create_mkey_in, in, translations_octword_actual_size,
9208e6e287fSHans Petter Selasky 		 get_octo_len(virt_addr, length, 1 << page_shift));
9218e6e287fSHans Petter Selasky 
922788333d9SHans Petter Selasky 	err = mlx5_core_create_mkey(dev->mdev, &mr->mmkey, in, inlen);
92312515907SHans Petter Selasky 	if (err) {
92412515907SHans Petter Selasky 		mlx5_ib_warn(dev, "create mkey failed\n");
92512515907SHans Petter Selasky 		goto err_2;
92612515907SHans Petter Selasky 	}
92712515907SHans Petter Selasky 	mr->umem = umem;
92812515907SHans Petter Selasky 	mr->dev = dev;
9298e6e287fSHans Petter Selasky 	mr->live = 1;
93012515907SHans Petter Selasky 	kvfree(in);
93112515907SHans Petter Selasky 
9328e6e287fSHans Petter Selasky 	mlx5_ib_dbg(dev, "mkey = 0x%x\n", mr->mmkey.key);
93312515907SHans Petter Selasky 
93412515907SHans Petter Selasky 	return mr;
93512515907SHans Petter Selasky 
93612515907SHans Petter Selasky err_2:
93712515907SHans Petter Selasky 	kvfree(in);
93812515907SHans Petter Selasky 
93912515907SHans Petter Selasky err_1:
9408e6e287fSHans Petter Selasky 	if (!ibmr)
94112515907SHans Petter Selasky 		kfree(mr);
94212515907SHans Petter Selasky 
94312515907SHans Petter Selasky 	return ERR_PTR(err);
94412515907SHans Petter Selasky }
94512515907SHans Petter Selasky 
set_mr_fileds(struct mlx5_ib_dev * dev,struct mlx5_ib_mr * mr,int npages,u64 length,int access_flags)9468e6e287fSHans Petter Selasky static void set_mr_fileds(struct mlx5_ib_dev *dev, struct mlx5_ib_mr *mr,
9478e6e287fSHans Petter Selasky 			  int npages, u64 length, int access_flags)
94812515907SHans Petter Selasky {
9498e6e287fSHans Petter Selasky 	mr->npages = npages;
9508e6e287fSHans Petter Selasky 	atomic_add(npages, &dev->mdev->priv.reg_pages);
9518e6e287fSHans Petter Selasky 	mr->ibmr.lkey = mr->mmkey.key;
9528e6e287fSHans Petter Selasky 	mr->ibmr.rkey = mr->mmkey.key;
9538e6e287fSHans Petter Selasky 	mr->ibmr.length = length;
9548e6e287fSHans Petter Selasky 	mr->access_flags = access_flags;
95512515907SHans Petter Selasky }
95612515907SHans Petter Selasky 
mlx5_ib_reg_user_mr(struct ib_pd * pd,u64 start,u64 length,u64 virt_addr,int access_flags,struct ib_udata * udata)95712515907SHans Petter Selasky struct ib_mr *mlx5_ib_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
95812515907SHans Petter Selasky 				  u64 virt_addr, int access_flags,
9598e6e287fSHans Petter Selasky 				  struct ib_udata *udata)
96012515907SHans Petter Selasky {
96112515907SHans Petter Selasky 	struct mlx5_ib_dev *dev = to_mdev(pd->device);
96212515907SHans Petter Selasky 	struct mlx5_ib_mr *mr = NULL;
96312515907SHans Petter Selasky 	struct ib_umem *umem;
96412515907SHans Petter Selasky 	int page_shift;
96512515907SHans Petter Selasky 	int npages;
96612515907SHans Petter Selasky 	int ncont;
96712515907SHans Petter Selasky 	int order;
96812515907SHans Petter Selasky 	int err;
96912515907SHans Petter Selasky 
97012515907SHans Petter Selasky 	mlx5_ib_dbg(dev, "start 0x%llx, virt_addr 0x%llx, length 0x%llx, access_flags 0x%x\n",
9718e6e287fSHans Petter Selasky 		    (long long)start, (long long)virt_addr, (long long)length, access_flags);
9728e6e287fSHans Petter Selasky 	umem = mr_umem_get(pd, start, length, access_flags, &npages,
9738e6e287fSHans Petter Selasky 			   &page_shift, &ncont, &order);
97412515907SHans Petter Selasky 
9758e6e287fSHans Petter Selasky 	if (IS_ERR(umem))
9768e6e287fSHans Petter Selasky 		return (void *)umem;
9778e6e287fSHans Petter Selasky 
9788e6e287fSHans Petter Selasky 	if (use_umr(order)) {
9798e6e287fSHans Petter Selasky 		mr = reg_umr(pd, umem, virt_addr, length, ncont, page_shift,
9808e6e287fSHans Petter Selasky 			     order, access_flags);
9818e6e287fSHans Petter Selasky 		if (PTR_ERR(mr) == -EAGAIN) {
9828e6e287fSHans Petter Selasky 			mlx5_ib_dbg(dev, "cache empty for order %d", order);
9838e6e287fSHans Petter Selasky 			mr = NULL;
9848e6e287fSHans Petter Selasky 		}
9858e6e287fSHans Petter Selasky 	} else if (access_flags & IB_ACCESS_ON_DEMAND) {
98612515907SHans Petter Selasky 		err = -EINVAL;
9878e6e287fSHans Petter Selasky 		pr_err("Got MR registration for ODP MR > 512MB, not supported for Connect-IB");
98812515907SHans Petter Selasky 		goto error;
98912515907SHans Petter Selasky 	}
99012515907SHans Petter Selasky 
9918e6e287fSHans Petter Selasky 	if (!mr) {
99212515907SHans Petter Selasky 		mutex_lock(&dev->slow_path_mutex);
9938e6e287fSHans Petter Selasky 		mr = reg_create(NULL, pd, virt_addr, length, umem, ncont,
9948e6e287fSHans Petter Selasky 				page_shift, access_flags);
99512515907SHans Petter Selasky 		mutex_unlock(&dev->slow_path_mutex);
9968e6e287fSHans Petter Selasky 	}
99712515907SHans Petter Selasky 
99812515907SHans Petter Selasky 	if (IS_ERR(mr)) {
99912515907SHans Petter Selasky 		err = PTR_ERR(mr);
100012515907SHans Petter Selasky 		goto error;
100112515907SHans Petter Selasky 	}
100212515907SHans Petter Selasky 
10038e6e287fSHans Petter Selasky 	mlx5_ib_dbg(dev, "mkey 0x%x\n", mr->mmkey.key);
100412515907SHans Petter Selasky 
100512515907SHans Petter Selasky 	mr->umem = umem;
10068e6e287fSHans Petter Selasky 	set_mr_fileds(dev, mr, npages, length, access_flags);
10078e6e287fSHans Petter Selasky 
10088e6e287fSHans Petter Selasky #ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING
10098e6e287fSHans Petter Selasky 	update_odp_mr(mr);
10108e6e287fSHans Petter Selasky #endif
101112515907SHans Petter Selasky 
101212515907SHans Petter Selasky 	return &mr->ibmr;
101312515907SHans Petter Selasky 
101412515907SHans Petter Selasky error:
101512515907SHans Petter Selasky 	ib_umem_release(umem);
101612515907SHans Petter Selasky 	return ERR_PTR(err);
101712515907SHans Petter Selasky }
101812515907SHans Petter Selasky 
unreg_umr(struct mlx5_ib_dev * dev,struct mlx5_ib_mr * mr)10198e6e287fSHans Petter Selasky static int unreg_umr(struct mlx5_ib_dev *dev, struct mlx5_ib_mr *mr)
10208e6e287fSHans Petter Selasky {
10218e6e287fSHans Petter Selasky 	struct mlx5_core_dev *mdev = dev->mdev;
10228e6e287fSHans Petter Selasky 	struct umr_common *umrc = &dev->umrc;
10238e6e287fSHans Petter Selasky 	struct mlx5_ib_umr_context umr_context;
10248e6e287fSHans Petter Selasky 	struct mlx5_umr_wr umrwr = {};
1025c3987b8eSHans Petter Selasky 	const struct ib_send_wr *bad;
10268e6e287fSHans Petter Selasky 	int err;
102712515907SHans Petter Selasky 
10288e6e287fSHans Petter Selasky 	if (mdev->state == MLX5_DEVICE_STATE_INTERNAL_ERROR)
10298e6e287fSHans Petter Selasky 		return 0;
10308e6e287fSHans Petter Selasky 
10318e6e287fSHans Petter Selasky 	mlx5_ib_init_umr_context(&umr_context);
10328e6e287fSHans Petter Selasky 
10338e6e287fSHans Petter Selasky 	umrwr.wr.wr_cqe = &umr_context.cqe;
1034c3987b8eSHans Petter Selasky 	prep_umr_unreg_wqe(dev, &umrwr, mr->mmkey.key);
10358e6e287fSHans Petter Selasky 
10368e6e287fSHans Petter Selasky 	down(&umrc->sem);
10378e6e287fSHans Petter Selasky 	err = ib_post_send(umrc->qp, &umrwr.wr, &bad);
10388e6e287fSHans Petter Selasky 	if (err) {
10398e6e287fSHans Petter Selasky 		up(&umrc->sem);
10408e6e287fSHans Petter Selasky 		mlx5_ib_dbg(dev, "err %d\n", err);
10418e6e287fSHans Petter Selasky 		goto error;
10428e6e287fSHans Petter Selasky 	} else {
10438e6e287fSHans Petter Selasky 		wait_for_completion(&umr_context.done);
10448e6e287fSHans Petter Selasky 		up(&umrc->sem);
10458e6e287fSHans Petter Selasky 	}
10468e6e287fSHans Petter Selasky 	if (umr_context.status != IB_WC_SUCCESS) {
10478e6e287fSHans Petter Selasky 		mlx5_ib_warn(dev, "unreg umr failed\n");
10488e6e287fSHans Petter Selasky 		err = -EFAULT;
10498e6e287fSHans Petter Selasky 		goto error;
10508e6e287fSHans Petter Selasky 	}
10518e6e287fSHans Petter Selasky 	return 0;
10528e6e287fSHans Petter Selasky 
10538e6e287fSHans Petter Selasky error:
10548e6e287fSHans Petter Selasky 	return err;
10558e6e287fSHans Petter Selasky }
10568e6e287fSHans Petter Selasky 
rereg_umr(struct ib_pd * pd,struct mlx5_ib_mr * mr,u64 virt_addr,u64 length,int npages,int page_shift,int order,int access_flags,int flags)10578e6e287fSHans Petter Selasky static int rereg_umr(struct ib_pd *pd, struct mlx5_ib_mr *mr, u64 virt_addr,
10588e6e287fSHans Petter Selasky 		     u64 length, int npages, int page_shift, int order,
10598e6e287fSHans Petter Selasky 		     int access_flags, int flags)
106012515907SHans Petter Selasky {
106112515907SHans Petter Selasky 	struct mlx5_ib_dev *dev = to_mdev(pd->device);
10628e6e287fSHans Petter Selasky 	struct device *ddev = dev->ib_dev.dma_device;
10638e6e287fSHans Petter Selasky 	struct mlx5_ib_umr_context umr_context;
1064c3987b8eSHans Petter Selasky 	const struct ib_send_wr *bad;
10658e6e287fSHans Petter Selasky 	struct mlx5_umr_wr umrwr = {};
10668e6e287fSHans Petter Selasky 	struct ib_sge sg;
10678e6e287fSHans Petter Selasky 	struct umr_common *umrc = &dev->umrc;
10688e6e287fSHans Petter Selasky 	dma_addr_t dma = 0;
10698e6e287fSHans Petter Selasky 	__be64 *mr_pas = NULL;
10708e6e287fSHans Petter Selasky 	int size;
107112515907SHans Petter Selasky 	int err;
107212515907SHans Petter Selasky 
10738e6e287fSHans Petter Selasky 	mlx5_ib_init_umr_context(&umr_context);
10748e6e287fSHans Petter Selasky 
10758e6e287fSHans Petter Selasky 	umrwr.wr.wr_cqe = &umr_context.cqe;
10768e6e287fSHans Petter Selasky 	umrwr.wr.send_flags = MLX5_IB_SEND_UMR_FAIL_IF_FREE;
10778e6e287fSHans Petter Selasky 
10788e6e287fSHans Petter Selasky 	if (flags & IB_MR_REREG_TRANS) {
10798e6e287fSHans Petter Selasky 		err = dma_map_mr_pas(dev, mr->umem, npages, page_shift, &size,
10808e6e287fSHans Petter Selasky 				     &mr_pas, &dma);
108112515907SHans Petter Selasky 		if (err)
108212515907SHans Petter Selasky 			return err;
108312515907SHans Petter Selasky 
10848e6e287fSHans Petter Selasky 		umrwr.target.virt_addr = virt_addr;
10858e6e287fSHans Petter Selasky 		umrwr.length = length;
10868e6e287fSHans Petter Selasky 		umrwr.wr.send_flags |= MLX5_IB_SEND_UMR_UPDATE_TRANSLATION;
108712515907SHans Petter Selasky 	}
108812515907SHans Petter Selasky 
1089c3987b8eSHans Petter Selasky 	prep_umr_wqe_common(pd, &umrwr, &sg, dma, npages, mr->mmkey.key,
10908e6e287fSHans Petter Selasky 			    page_shift);
10918e6e287fSHans Petter Selasky 
10928e6e287fSHans Petter Selasky 	if (flags & IB_MR_REREG_PD) {
10938e6e287fSHans Petter Selasky 		umrwr.pd = pd;
10948e6e287fSHans Petter Selasky 		umrwr.wr.send_flags |= MLX5_IB_SEND_UMR_UPDATE_PD;
10958e6e287fSHans Petter Selasky 	}
10968e6e287fSHans Petter Selasky 
10978e6e287fSHans Petter Selasky 	if (flags & IB_MR_REREG_ACCESS) {
10988e6e287fSHans Petter Selasky 		umrwr.access_flags = access_flags;
10998e6e287fSHans Petter Selasky 		umrwr.wr.send_flags |= MLX5_IB_SEND_UMR_UPDATE_ACCESS;
11008e6e287fSHans Petter Selasky 	}
11018e6e287fSHans Petter Selasky 
11028e6e287fSHans Petter Selasky 	/* post send request to UMR QP */
11038e6e287fSHans Petter Selasky 	down(&umrc->sem);
11048e6e287fSHans Petter Selasky 	err = ib_post_send(umrc->qp, &umrwr.wr, &bad);
11058e6e287fSHans Petter Selasky 
11068e6e287fSHans Petter Selasky 	if (err) {
11078e6e287fSHans Petter Selasky 		mlx5_ib_warn(dev, "post send failed, err %d\n", err);
11088e6e287fSHans Petter Selasky 	} else {
11098e6e287fSHans Petter Selasky 		wait_for_completion(&umr_context.done);
11108e6e287fSHans Petter Selasky 		if (umr_context.status != IB_WC_SUCCESS) {
11118e6e287fSHans Petter Selasky 			mlx5_ib_warn(dev, "reg umr failed (%u)\n",
11128e6e287fSHans Petter Selasky 				     umr_context.status);
11138e6e287fSHans Petter Selasky 			err = -EFAULT;
11148e6e287fSHans Petter Selasky 		}
11158e6e287fSHans Petter Selasky 	}
11168e6e287fSHans Petter Selasky 
11178e6e287fSHans Petter Selasky 	up(&umrc->sem);
11188e6e287fSHans Petter Selasky 	if (flags & IB_MR_REREG_TRANS) {
11198e6e287fSHans Petter Selasky 		dma_unmap_single(ddev, dma, size, DMA_TO_DEVICE);
11208e6e287fSHans Petter Selasky 		kfree(mr_pas);
11218e6e287fSHans Petter Selasky 	}
11228e6e287fSHans Petter Selasky 	return err;
11238e6e287fSHans Petter Selasky }
11248e6e287fSHans Petter Selasky 
mlx5_ib_rereg_user_mr(struct ib_mr * ib_mr,int flags,u64 start,u64 length,u64 virt_addr,int new_access_flags,struct ib_pd * new_pd,struct ib_udata * udata)11258e6e287fSHans Petter Selasky int mlx5_ib_rereg_user_mr(struct ib_mr *ib_mr, int flags, u64 start,
11268e6e287fSHans Petter Selasky 			  u64 length, u64 virt_addr, int new_access_flags,
11278e6e287fSHans Petter Selasky 			  struct ib_pd *new_pd, struct ib_udata *udata)
11288e6e287fSHans Petter Selasky {
11298e6e287fSHans Petter Selasky 	struct mlx5_ib_dev *dev = to_mdev(ib_mr->device);
11308e6e287fSHans Petter Selasky 	struct mlx5_ib_mr *mr = to_mmr(ib_mr);
11318e6e287fSHans Petter Selasky 	struct ib_pd *pd = (flags & IB_MR_REREG_PD) ? new_pd : ib_mr->pd;
11328e6e287fSHans Petter Selasky 	int access_flags = flags & IB_MR_REREG_ACCESS ?
11338e6e287fSHans Petter Selasky 			    new_access_flags :
11348e6e287fSHans Petter Selasky 			    mr->access_flags;
11358e6e287fSHans Petter Selasky 	u64 addr = (flags & IB_MR_REREG_TRANS) ? virt_addr : mr->umem->address;
11368e6e287fSHans Petter Selasky 	u64 len = (flags & IB_MR_REREG_TRANS) ? length : mr->umem->length;
11378e6e287fSHans Petter Selasky 	int page_shift = 0;
11388e6e287fSHans Petter Selasky 	int npages = 0;
11398e6e287fSHans Petter Selasky 	int ncont = 0;
11408e6e287fSHans Petter Selasky 	int order = 0;
11418e6e287fSHans Petter Selasky 	int err;
11428e6e287fSHans Petter Selasky 
11438e6e287fSHans Petter Selasky 	mlx5_ib_dbg(dev, "start 0x%llx, virt_addr 0x%llx, length 0x%llx, access_flags 0x%x\n",
11448e6e287fSHans Petter Selasky 		    (long long)start, (long long)virt_addr, (long long)length, access_flags);
11458e6e287fSHans Petter Selasky 
11468e6e287fSHans Petter Selasky 	if (flags != IB_MR_REREG_PD) {
11478e6e287fSHans Petter Selasky 		/*
11488e6e287fSHans Petter Selasky 		 * Replace umem. This needs to be done whether or not UMR is
11498e6e287fSHans Petter Selasky 		 * used.
11508e6e287fSHans Petter Selasky 		 */
11518e6e287fSHans Petter Selasky 		flags |= IB_MR_REREG_TRANS;
11528e6e287fSHans Petter Selasky 		ib_umem_release(mr->umem);
11538e6e287fSHans Petter Selasky 		mr->umem = mr_umem_get(pd, addr, len, access_flags, &npages,
11548e6e287fSHans Petter Selasky 				       &page_shift, &ncont, &order);
11558e6e287fSHans Petter Selasky 		if (IS_ERR(mr->umem)) {
11568e6e287fSHans Petter Selasky 			err = PTR_ERR(mr->umem);
11578e6e287fSHans Petter Selasky 			mr->umem = NULL;
11588e6e287fSHans Petter Selasky 			return err;
11598e6e287fSHans Petter Selasky 		}
11608e6e287fSHans Petter Selasky 	}
11618e6e287fSHans Petter Selasky 
11628e6e287fSHans Petter Selasky 	if (flags & IB_MR_REREG_TRANS && !use_umr_mtt_update(mr, addr, len)) {
11638e6e287fSHans Petter Selasky 		/*
11648e6e287fSHans Petter Selasky 		 * UMR can't be used - MKey needs to be replaced.
11658e6e287fSHans Petter Selasky 		 */
11668e6e287fSHans Petter Selasky 		if (mr->umred) {
11678e6e287fSHans Petter Selasky 			err = unreg_umr(dev, mr);
11688e6e287fSHans Petter Selasky 			if (err)
11698e6e287fSHans Petter Selasky 				mlx5_ib_warn(dev, "Failed to unregister MR\n");
11708e6e287fSHans Petter Selasky 		} else {
11718e6e287fSHans Petter Selasky 			err = destroy_mkey(dev, mr);
11728e6e287fSHans Petter Selasky 			if (err)
11738e6e287fSHans Petter Selasky 				mlx5_ib_warn(dev, "Failed to destroy MKey\n");
11748e6e287fSHans Petter Selasky 		}
11758e6e287fSHans Petter Selasky 		if (err)
11768e6e287fSHans Petter Selasky 			return err;
11778e6e287fSHans Petter Selasky 
11788e6e287fSHans Petter Selasky 		mr = reg_create(ib_mr, pd, addr, len, mr->umem, ncont,
11798e6e287fSHans Petter Selasky 				page_shift, access_flags);
11808e6e287fSHans Petter Selasky 
11818e6e287fSHans Petter Selasky 		if (IS_ERR(mr))
11828e6e287fSHans Petter Selasky 			return PTR_ERR(mr);
11838e6e287fSHans Petter Selasky 
11848e6e287fSHans Petter Selasky 		mr->umred = 0;
11858e6e287fSHans Petter Selasky 	} else {
11868e6e287fSHans Petter Selasky 		/*
11878e6e287fSHans Petter Selasky 		 * Send a UMR WQE
11888e6e287fSHans Petter Selasky 		 */
11898e6e287fSHans Petter Selasky 		err = rereg_umr(pd, mr, addr, len, npages, page_shift,
11908e6e287fSHans Petter Selasky 				order, access_flags, flags);
11918e6e287fSHans Petter Selasky 		if (err) {
11928e6e287fSHans Petter Selasky 			mlx5_ib_warn(dev, "Failed to rereg UMR\n");
11938e6e287fSHans Petter Selasky 			return err;
11948e6e287fSHans Petter Selasky 		}
11958e6e287fSHans Petter Selasky 	}
11968e6e287fSHans Petter Selasky 
11978e6e287fSHans Petter Selasky 	if (flags & IB_MR_REREG_PD) {
11988e6e287fSHans Petter Selasky 		ib_mr->pd = pd;
11998e6e287fSHans Petter Selasky 		mr->mmkey.pd = to_mpd(pd)->pdn;
12008e6e287fSHans Petter Selasky 	}
12018e6e287fSHans Petter Selasky 
12028e6e287fSHans Petter Selasky 	if (flags & IB_MR_REREG_ACCESS)
12038e6e287fSHans Petter Selasky 		mr->access_flags = access_flags;
12048e6e287fSHans Petter Selasky 
12058e6e287fSHans Petter Selasky 	if (flags & IB_MR_REREG_TRANS) {
12068e6e287fSHans Petter Selasky 		atomic_sub(mr->npages, &dev->mdev->priv.reg_pages);
12078e6e287fSHans Petter Selasky 		set_mr_fileds(dev, mr, npages, len, access_flags);
12088e6e287fSHans Petter Selasky 		mr->mmkey.iova = addr;
12098e6e287fSHans Petter Selasky 		mr->mmkey.size = len;
12108e6e287fSHans Petter Selasky 	}
12118e6e287fSHans Petter Selasky #ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING
12128e6e287fSHans Petter Selasky 	update_odp_mr(mr);
12138e6e287fSHans Petter Selasky #endif
121412515907SHans Petter Selasky 
121512515907SHans Petter Selasky 	return 0;
121612515907SHans Petter Selasky }
121712515907SHans Petter Selasky 
12188e6e287fSHans Petter Selasky static int
mlx5_alloc_priv_descs(struct ib_device * device,struct mlx5_ib_mr * mr,int ndescs,int desc_size)12198e6e287fSHans Petter Selasky mlx5_alloc_priv_descs(struct ib_device *device,
12208e6e287fSHans Petter Selasky 		      struct mlx5_ib_mr *mr,
12218e6e287fSHans Petter Selasky 		      int ndescs,
12228e6e287fSHans Petter Selasky 		      int desc_size)
122312515907SHans Petter Selasky {
12248e6e287fSHans Petter Selasky 	int size = ndescs * desc_size;
12258e6e287fSHans Petter Selasky 	int add_size;
12268e6e287fSHans Petter Selasky 	int ret;
12278e6e287fSHans Petter Selasky 
12288e6e287fSHans Petter Selasky 	add_size = max_t(int, MLX5_UMR_ALIGN - 1, 0);
12298e6e287fSHans Petter Selasky 
12308e6e287fSHans Petter Selasky 	mr->descs_alloc = kzalloc(size + add_size, GFP_KERNEL);
12318e6e287fSHans Petter Selasky 	if (!mr->descs_alloc)
12328e6e287fSHans Petter Selasky 		return -ENOMEM;
12338e6e287fSHans Petter Selasky 
12348e6e287fSHans Petter Selasky 	mr->descs = PTR_ALIGN(mr->descs_alloc, MLX5_UMR_ALIGN);
12358e6e287fSHans Petter Selasky 
12368e6e287fSHans Petter Selasky 	mr->desc_map = dma_map_single(device->dma_device, mr->descs,
12378e6e287fSHans Petter Selasky 				      size, DMA_TO_DEVICE);
12388e6e287fSHans Petter Selasky 	if (dma_mapping_error(device->dma_device, mr->desc_map)) {
12398e6e287fSHans Petter Selasky 		ret = -ENOMEM;
12408e6e287fSHans Petter Selasky 		goto err;
12418e6e287fSHans Petter Selasky 	}
12428e6e287fSHans Petter Selasky 
12438e6e287fSHans Petter Selasky 	return 0;
12448e6e287fSHans Petter Selasky err:
12458e6e287fSHans Petter Selasky 	kfree(mr->descs_alloc);
12468e6e287fSHans Petter Selasky 
12478e6e287fSHans Petter Selasky 	return ret;
12488e6e287fSHans Petter Selasky }
12498e6e287fSHans Petter Selasky 
12508e6e287fSHans Petter Selasky static void
mlx5_free_priv_descs(struct mlx5_ib_mr * mr)12518e6e287fSHans Petter Selasky mlx5_free_priv_descs(struct mlx5_ib_mr *mr)
12528e6e287fSHans Petter Selasky {
12538e6e287fSHans Petter Selasky 	if (mr->descs) {
12548e6e287fSHans Petter Selasky 		struct ib_device *device = mr->ibmr.device;
12558e6e287fSHans Petter Selasky 		int size = mr->max_descs * mr->desc_size;
12568e6e287fSHans Petter Selasky 
12578e6e287fSHans Petter Selasky 		dma_unmap_single(device->dma_device, mr->desc_map,
12588e6e287fSHans Petter Selasky 				 size, DMA_TO_DEVICE);
12598e6e287fSHans Petter Selasky 		kfree(mr->descs_alloc);
12608e6e287fSHans Petter Selasky 		mr->descs = NULL;
12618e6e287fSHans Petter Selasky 	}
12628e6e287fSHans Petter Selasky }
12638e6e287fSHans Petter Selasky 
clean_mr(struct mlx5_ib_mr * mr)12648e6e287fSHans Petter Selasky static int clean_mr(struct mlx5_ib_mr *mr)
12658e6e287fSHans Petter Selasky {
12668e6e287fSHans Petter Selasky 	struct mlx5_ib_dev *dev = to_mdev(mr->ibmr.device);
12678e6e287fSHans Petter Selasky 	int umred = mr->umred;
126812515907SHans Petter Selasky 	int err;
126912515907SHans Petter Selasky 
127012515907SHans Petter Selasky 	if (mr->sig) {
127112515907SHans Petter Selasky 		if (mlx5_core_destroy_psv(dev->mdev,
127212515907SHans Petter Selasky 					  mr->sig->psv_memory.psv_idx))
127312515907SHans Petter Selasky 			mlx5_ib_warn(dev, "failed to destroy mem psv %d\n",
127412515907SHans Petter Selasky 				     mr->sig->psv_memory.psv_idx);
127512515907SHans Petter Selasky 		if (mlx5_core_destroy_psv(dev->mdev,
127612515907SHans Petter Selasky 					  mr->sig->psv_wire.psv_idx))
127712515907SHans Petter Selasky 			mlx5_ib_warn(dev, "failed to destroy wire psv %d\n",
127812515907SHans Petter Selasky 				     mr->sig->psv_wire.psv_idx);
127912515907SHans Petter Selasky 		kfree(mr->sig);
12808e6e287fSHans Petter Selasky 		mr->sig = NULL;
128112515907SHans Petter Selasky 	}
128212515907SHans Petter Selasky 
12838e6e287fSHans Petter Selasky 	mlx5_free_priv_descs(mr);
12848e6e287fSHans Petter Selasky 
12858e6e287fSHans Petter Selasky 	if (!umred) {
1286*e4d178d0SHans Petter Selasky 		u32 key = mr->mmkey.key;
1287*e4d178d0SHans Petter Selasky 
128812515907SHans Petter Selasky 		err = destroy_mkey(dev, mr);
1289*e4d178d0SHans Petter Selasky 		kfree(mr);
129012515907SHans Petter Selasky 		if (err) {
129112515907SHans Petter Selasky 			mlx5_ib_warn(dev, "failed to destroy mkey 0x%x (%d)\n",
1292*e4d178d0SHans Petter Selasky 				     key, err);
129312515907SHans Petter Selasky 			return err;
129412515907SHans Petter Selasky 		}
12958e6e287fSHans Petter Selasky 	} else {
12968e6e287fSHans Petter Selasky 		err = unreg_umr(dev, mr);
12978e6e287fSHans Petter Selasky 		if (err) {
12988e6e287fSHans Petter Selasky 			mlx5_ib_warn(dev, "failed unregister\n");
12998e6e287fSHans Petter Selasky 			return err;
13008e6e287fSHans Petter Selasky 		}
13018e6e287fSHans Petter Selasky 		free_cached_mr(dev, mr);
13028e6e287fSHans Petter Selasky 	}
130312515907SHans Petter Selasky 
13048e6e287fSHans Petter Selasky 	return 0;
130512515907SHans Petter Selasky }
130612515907SHans Petter Selasky 
mlx5_ib_dereg_mr(struct ib_mr * ibmr,struct ib_udata * udata)1307b633e08cSHans Petter Selasky int mlx5_ib_dereg_mr(struct ib_mr *ibmr, struct ib_udata *udata)
13088e6e287fSHans Petter Selasky {
13098e6e287fSHans Petter Selasky 	struct mlx5_ib_dev *dev = to_mdev(ibmr->device);
13108e6e287fSHans Petter Selasky 	struct mlx5_ib_mr *mr = to_mmr(ibmr);
13118e6e287fSHans Petter Selasky 	int npages = mr->npages;
13128e6e287fSHans Petter Selasky 	struct ib_umem *umem = mr->umem;
13138e6e287fSHans Petter Selasky 
13148e6e287fSHans Petter Selasky #ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING
13158e6e287fSHans Petter Selasky 	if (umem && umem->odp_data) {
13168e6e287fSHans Petter Selasky 		/* Prevent new page faults from succeeding */
13178e6e287fSHans Petter Selasky 		mr->live = 0;
13188e6e287fSHans Petter Selasky 		/* Wait for all running page-fault handlers to finish. */
13198e6e287fSHans Petter Selasky 		synchronize_srcu(&dev->mr_srcu);
13208e6e287fSHans Petter Selasky 		/* Destroy all page mappings */
13218e6e287fSHans Petter Selasky 		mlx5_ib_invalidate_range(umem, ib_umem_start(umem),
13228e6e287fSHans Petter Selasky 					 ib_umem_end(umem));
13238e6e287fSHans Petter Selasky 		/*
13248e6e287fSHans Petter Selasky 		 * We kill the umem before the MR for ODP,
13258e6e287fSHans Petter Selasky 		 * so that there will not be any invalidations in
13268e6e287fSHans Petter Selasky 		 * flight, looking at the *mr struct.
13278e6e287fSHans Petter Selasky 		 */
13288e6e287fSHans Petter Selasky 		ib_umem_release(umem);
13298e6e287fSHans Petter Selasky 		atomic_sub(npages, &dev->mdev->priv.reg_pages);
13308e6e287fSHans Petter Selasky 
13318e6e287fSHans Petter Selasky 		/* Avoid double-freeing the umem. */
13328e6e287fSHans Petter Selasky 		umem = NULL;
13338e6e287fSHans Petter Selasky 	}
13348e6e287fSHans Petter Selasky #endif
13358e6e287fSHans Petter Selasky 
13368e6e287fSHans Petter Selasky 	clean_mr(mr);
13378e6e287fSHans Petter Selasky 
13388e6e287fSHans Petter Selasky 	if (umem) {
13398e6e287fSHans Petter Selasky 		ib_umem_release(umem);
13408e6e287fSHans Petter Selasky 		atomic_sub(npages, &dev->mdev->priv.reg_pages);
13418e6e287fSHans Petter Selasky 	}
13428e6e287fSHans Petter Selasky 
13438e6e287fSHans Petter Selasky 	return 0;
13448e6e287fSHans Petter Selasky }
13458e6e287fSHans Petter Selasky 
mlx5_ib_alloc_mr(struct ib_pd * pd,enum ib_mr_type mr_type,u32 max_num_sg,struct ib_udata * udata)13468e6e287fSHans Petter Selasky struct ib_mr *mlx5_ib_alloc_mr(struct ib_pd *pd,
13478e6e287fSHans Petter Selasky 			       enum ib_mr_type mr_type,
1348b633e08cSHans Petter Selasky 			       u32 max_num_sg, struct ib_udata *udata)
134912515907SHans Petter Selasky {
135012515907SHans Petter Selasky 	struct mlx5_ib_dev *dev = to_mdev(pd->device);
13518e6e287fSHans Petter Selasky 	int inlen = MLX5_ST_SZ_BYTES(create_mkey_in);
13528e6e287fSHans Petter Selasky 	int ndescs = ALIGN(max_num_sg, 4);
135312515907SHans Petter Selasky 	struct mlx5_ib_mr *mr;
13548e6e287fSHans Petter Selasky 	void *mkc;
13558e6e287fSHans Petter Selasky 	u32 *in;
135612515907SHans Petter Selasky 	int err;
135712515907SHans Petter Selasky 
135812515907SHans Petter Selasky 	mr = kzalloc(sizeof(*mr), GFP_KERNEL);
135912515907SHans Petter Selasky 	if (!mr)
136012515907SHans Petter Selasky 		return ERR_PTR(-ENOMEM);
136112515907SHans Petter Selasky 
13628e6e287fSHans Petter Selasky 	in = kzalloc(inlen, GFP_KERNEL);
136312515907SHans Petter Selasky 	if (!in) {
136412515907SHans Petter Selasky 		err = -ENOMEM;
136512515907SHans Petter Selasky 		goto err_free;
136612515907SHans Petter Selasky 	}
136712515907SHans Petter Selasky 
13688e6e287fSHans Petter Selasky 	mkc = MLX5_ADDR_OF(create_mkey_in, in, memory_key_mkey_entry);
13698e6e287fSHans Petter Selasky 	MLX5_SET(mkc, mkc, free, 1);
13708e6e287fSHans Petter Selasky 	MLX5_SET(mkc, mkc, translations_octword_size, ndescs);
13718e6e287fSHans Petter Selasky 	MLX5_SET(mkc, mkc, qpn, 0xffffff);
13728e6e287fSHans Petter Selasky 	MLX5_SET(mkc, mkc, pd, to_mpd(pd)->pdn);
137312515907SHans Petter Selasky 
13748e6e287fSHans Petter Selasky 	if (mr_type == IB_MR_TYPE_MEM_REG) {
13758e6e287fSHans Petter Selasky 		mr->access_mode = MLX5_ACCESS_MODE_MTT;
13768e6e287fSHans Petter Selasky 		MLX5_SET(mkc, mkc, log_page_size, PAGE_SHIFT);
13778e6e287fSHans Petter Selasky 		err = mlx5_alloc_priv_descs(pd->device, mr,
13788e6e287fSHans Petter Selasky 					    ndescs, sizeof(u64));
13798e6e287fSHans Petter Selasky 		if (err)
13808e6e287fSHans Petter Selasky 			goto err_free_in;
13818e6e287fSHans Petter Selasky 
13828e6e287fSHans Petter Selasky 		mr->desc_size = sizeof(u64);
13838e6e287fSHans Petter Selasky 		mr->max_descs = ndescs;
13848e6e287fSHans Petter Selasky 	} else if (mr_type == IB_MR_TYPE_SG_GAPS) {
13858e6e287fSHans Petter Selasky 		mr->access_mode = MLX5_ACCESS_MODE_KLM;
13868e6e287fSHans Petter Selasky 
13878e6e287fSHans Petter Selasky 		err = mlx5_alloc_priv_descs(pd->device, mr,
13888e6e287fSHans Petter Selasky 					    ndescs, sizeof(struct mlx5_klm));
13898e6e287fSHans Petter Selasky 		if (err)
13908e6e287fSHans Petter Selasky 			goto err_free_in;
13918e6e287fSHans Petter Selasky 		mr->desc_size = sizeof(struct mlx5_klm);
13928e6e287fSHans Petter Selasky 		mr->max_descs = ndescs;
1393b633e08cSHans Petter Selasky 	} else if (mr_type == IB_MR_TYPE_INTEGRITY) {
13948e6e287fSHans Petter Selasky 		u32 psv_index[2];
13958e6e287fSHans Petter Selasky 
13968e6e287fSHans Petter Selasky 		MLX5_SET(mkc, mkc, bsf_en, 1);
13978e6e287fSHans Petter Selasky 		MLX5_SET(mkc, mkc, bsf_octword_size, MLX5_MKEY_BSF_OCTO_SIZE);
13988e6e287fSHans Petter Selasky 		mr->sig = kzalloc(sizeof(*mr->sig), GFP_KERNEL);
13998e6e287fSHans Petter Selasky 		if (!mr->sig) {
14008e6e287fSHans Petter Selasky 			err = -ENOMEM;
14018e6e287fSHans Petter Selasky 			goto err_free_in;
140212515907SHans Petter Selasky 		}
140312515907SHans Petter Selasky 
14048e6e287fSHans Petter Selasky 		/* create mem & wire PSVs */
14058e6e287fSHans Petter Selasky 		err = mlx5_core_create_psv(dev->mdev, to_mpd(pd)->pdn,
14068e6e287fSHans Petter Selasky 					   2, psv_index);
14078e6e287fSHans Petter Selasky 		if (err)
14088e6e287fSHans Petter Selasky 			goto err_free_sig;
14098e6e287fSHans Petter Selasky 
14108e6e287fSHans Petter Selasky 		mr->access_mode = MLX5_ACCESS_MODE_KLM;
14118e6e287fSHans Petter Selasky 		mr->sig->psv_memory.psv_idx = psv_index[0];
14128e6e287fSHans Petter Selasky 		mr->sig->psv_wire.psv_idx = psv_index[1];
14138e6e287fSHans Petter Selasky 
14148e6e287fSHans Petter Selasky 		mr->sig->sig_status_checked = true;
14158e6e287fSHans Petter Selasky 		mr->sig->sig_err_exists = false;
14168e6e287fSHans Petter Selasky 		/* Next UMR, Arm SIGERR */
14178e6e287fSHans Petter Selasky 		++mr->sig->sigerr_count;
14188e6e287fSHans Petter Selasky 	} else {
14198e6e287fSHans Petter Selasky 		mlx5_ib_warn(dev, "Invalid mr type %d\n", mr_type);
14208e6e287fSHans Petter Selasky 		err = -EINVAL;
14218e6e287fSHans Petter Selasky 		goto err_free_in;
14228e6e287fSHans Petter Selasky 	}
14238e6e287fSHans Petter Selasky 
14248e6e287fSHans Petter Selasky 	MLX5_SET(mkc, mkc, access_mode, mr->access_mode);
14258e6e287fSHans Petter Selasky 	MLX5_SET(mkc, mkc, umr_en, 1);
14268e6e287fSHans Petter Selasky 
1427788333d9SHans Petter Selasky 	err = mlx5_core_create_mkey(dev->mdev, &mr->mmkey, in, inlen);
14288e6e287fSHans Petter Selasky 	if (err)
14298e6e287fSHans Petter Selasky 		goto err_destroy_psv;
14308e6e287fSHans Petter Selasky 
14318e6e287fSHans Petter Selasky 	mr->ibmr.lkey = mr->mmkey.key;
14328e6e287fSHans Petter Selasky 	mr->ibmr.rkey = mr->mmkey.key;
143312515907SHans Petter Selasky 	mr->umem = NULL;
14348e6e287fSHans Petter Selasky 	kfree(in);
143512515907SHans Petter Selasky 
143612515907SHans Petter Selasky 	return &mr->ibmr;
143712515907SHans Petter Selasky 
14388e6e287fSHans Petter Selasky err_destroy_psv:
14398e6e287fSHans Petter Selasky 	if (mr->sig) {
14408e6e287fSHans Petter Selasky 		if (mlx5_core_destroy_psv(dev->mdev,
14418e6e287fSHans Petter Selasky 					  mr->sig->psv_memory.psv_idx))
14428e6e287fSHans Petter Selasky 			mlx5_ib_warn(dev, "failed to destroy mem psv %d\n",
14438e6e287fSHans Petter Selasky 				     mr->sig->psv_memory.psv_idx);
14448e6e287fSHans Petter Selasky 		if (mlx5_core_destroy_psv(dev->mdev,
14458e6e287fSHans Petter Selasky 					  mr->sig->psv_wire.psv_idx))
14468e6e287fSHans Petter Selasky 			mlx5_ib_warn(dev, "failed to destroy wire psv %d\n",
14478e6e287fSHans Petter Selasky 				     mr->sig->psv_wire.psv_idx);
14488e6e287fSHans Petter Selasky 	}
14498e6e287fSHans Petter Selasky 	mlx5_free_priv_descs(mr);
14508e6e287fSHans Petter Selasky err_free_sig:
14518e6e287fSHans Petter Selasky 	kfree(mr->sig);
14528e6e287fSHans Petter Selasky err_free_in:
14538e6e287fSHans Petter Selasky 	kfree(in);
145412515907SHans Petter Selasky err_free:
145512515907SHans Petter Selasky 	kfree(mr);
145612515907SHans Petter Selasky 	return ERR_PTR(err);
145712515907SHans Petter Selasky }
145812515907SHans Petter Selasky 
mlx5_ib_alloc_mw(struct ib_pd * pd,enum ib_mw_type type,struct ib_udata * udata)14598e6e287fSHans Petter Selasky struct ib_mw *mlx5_ib_alloc_mw(struct ib_pd *pd, enum ib_mw_type type,
14608e6e287fSHans Petter Selasky 			       struct ib_udata *udata)
146112515907SHans Petter Selasky {
14628e6e287fSHans Petter Selasky 	struct mlx5_ib_dev *dev = to_mdev(pd->device);
14638e6e287fSHans Petter Selasky 	int inlen = MLX5_ST_SZ_BYTES(create_mkey_in);
14648e6e287fSHans Petter Selasky 	struct mlx5_ib_mw *mw = NULL;
14658e6e287fSHans Petter Selasky 	u32 *in = NULL;
14668e6e287fSHans Petter Selasky 	void *mkc;
14678e6e287fSHans Petter Selasky 	int ndescs;
146812515907SHans Petter Selasky 	int err;
14698e6e287fSHans Petter Selasky 	struct mlx5_ib_alloc_mw req = {};
14708e6e287fSHans Petter Selasky 	struct {
14718e6e287fSHans Petter Selasky 		__u32	comp_mask;
14728e6e287fSHans Petter Selasky 		__u32	response_length;
14738e6e287fSHans Petter Selasky 	} resp = {};
147412515907SHans Petter Selasky 
14758e6e287fSHans Petter Selasky 	err = ib_copy_from_udata(&req, udata, min(udata->inlen, sizeof(req)));
147612515907SHans Petter Selasky 	if (err)
14778e6e287fSHans Petter Selasky 		return ERR_PTR(err);
14788e6e287fSHans Petter Selasky 
14798e6e287fSHans Petter Selasky 	if (req.comp_mask || req.reserved1 || req.reserved2)
14808e6e287fSHans Petter Selasky 		return ERR_PTR(-EOPNOTSUPP);
14818e6e287fSHans Petter Selasky 
14828e6e287fSHans Petter Selasky 	if (udata->inlen > sizeof(req) &&
14838e6e287fSHans Petter Selasky 	    !ib_is_udata_cleared(udata, sizeof(req),
14848e6e287fSHans Petter Selasky 				 udata->inlen - sizeof(req)))
14858e6e287fSHans Petter Selasky 		return ERR_PTR(-EOPNOTSUPP);
14868e6e287fSHans Petter Selasky 
14878e6e287fSHans Petter Selasky 	ndescs = req.num_klms ? roundup(req.num_klms, 4) : roundup(1, 4);
14888e6e287fSHans Petter Selasky 
14898e6e287fSHans Petter Selasky 	mw = kzalloc(sizeof(*mw), GFP_KERNEL);
14908e6e287fSHans Petter Selasky 	in = kzalloc(inlen, GFP_KERNEL);
14918e6e287fSHans Petter Selasky 	if (!mw || !in) {
14928e6e287fSHans Petter Selasky 		err = -ENOMEM;
14938e6e287fSHans Petter Selasky 		goto free;
149412515907SHans Petter Selasky 	}
149512515907SHans Petter Selasky 
14968e6e287fSHans Petter Selasky 	mkc = MLX5_ADDR_OF(create_mkey_in, in, memory_key_mkey_entry);
149712515907SHans Petter Selasky 
14988e6e287fSHans Petter Selasky 	MLX5_SET(mkc, mkc, free, 1);
14998e6e287fSHans Petter Selasky 	MLX5_SET(mkc, mkc, translations_octword_size, ndescs);
15008e6e287fSHans Petter Selasky 	MLX5_SET(mkc, mkc, pd, to_mpd(pd)->pdn);
15018e6e287fSHans Petter Selasky 	MLX5_SET(mkc, mkc, umr_en, 1);
15028e6e287fSHans Petter Selasky 	MLX5_SET(mkc, mkc, lr, 1);
15038e6e287fSHans Petter Selasky 	MLX5_SET(mkc, mkc, access_mode, MLX5_ACCESS_MODE_KLM);
15048e6e287fSHans Petter Selasky 	MLX5_SET(mkc, mkc, en_rinval, !!((type == IB_MW_TYPE_2)));
15058e6e287fSHans Petter Selasky 	MLX5_SET(mkc, mkc, qpn, 0xffffff);
150612515907SHans Petter Selasky 
1507788333d9SHans Petter Selasky 	err = mlx5_core_create_mkey(dev->mdev, &mw->mmkey, in, inlen);
150812515907SHans Petter Selasky 	if (err)
15098e6e287fSHans Petter Selasky 		goto free;
15108e6e287fSHans Petter Selasky 
15118e6e287fSHans Petter Selasky 	mw->ibmw.rkey = mw->mmkey.key;
15128e6e287fSHans Petter Selasky 
15138e6e287fSHans Petter Selasky 	resp.response_length = min(offsetof(typeof(resp), response_length) +
15148e6e287fSHans Petter Selasky 				   sizeof(resp.response_length), udata->outlen);
15158e6e287fSHans Petter Selasky 	if (resp.response_length) {
15168e6e287fSHans Petter Selasky 		err = ib_copy_to_udata(udata, &resp, resp.response_length);
15178e6e287fSHans Petter Selasky 		if (err) {
15188e6e287fSHans Petter Selasky 			mlx5_core_destroy_mkey(dev->mdev, &mw->mmkey);
15198e6e287fSHans Petter Selasky 			goto free;
15208e6e287fSHans Petter Selasky 		}
15218e6e287fSHans Petter Selasky 	}
15228e6e287fSHans Petter Selasky 
15238e6e287fSHans Petter Selasky 	kfree(in);
15248e6e287fSHans Petter Selasky 	return &mw->ibmw;
15258e6e287fSHans Petter Selasky 
15268e6e287fSHans Petter Selasky free:
15278e6e287fSHans Petter Selasky 	kfree(mw);
15288e6e287fSHans Petter Selasky 	kfree(in);
15298e6e287fSHans Petter Selasky 	return ERR_PTR(err);
15308e6e287fSHans Petter Selasky }
15318e6e287fSHans Petter Selasky 
mlx5_ib_dealloc_mw(struct ib_mw * mw)15328e6e287fSHans Petter Selasky int mlx5_ib_dealloc_mw(struct ib_mw *mw)
15338e6e287fSHans Petter Selasky {
15348e6e287fSHans Petter Selasky 	struct mlx5_ib_mw *mmw = to_mmw(mw);
15358e6e287fSHans Petter Selasky 	int err;
15368e6e287fSHans Petter Selasky 
15378e6e287fSHans Petter Selasky 	err =  mlx5_core_destroy_mkey((to_mdev(mw->device))->mdev,
15388e6e287fSHans Petter Selasky 				      &mmw->mmkey);
15398e6e287fSHans Petter Selasky 	if (!err)
15408e6e287fSHans Petter Selasky 		kfree(mmw);
15418e6e287fSHans Petter Selasky 	return err;
15428e6e287fSHans Petter Selasky }
15438e6e287fSHans Petter Selasky 
mlx5_ib_check_mr_status(struct ib_mr * ibmr,u32 check_mask,struct ib_mr_status * mr_status)15448e6e287fSHans Petter Selasky int mlx5_ib_check_mr_status(struct ib_mr *ibmr, u32 check_mask,
15458e6e287fSHans Petter Selasky 			    struct ib_mr_status *mr_status)
15468e6e287fSHans Petter Selasky {
15478e6e287fSHans Petter Selasky 	struct mlx5_ib_mr *mmr = to_mmr(ibmr);
15488e6e287fSHans Petter Selasky 	int ret = 0;
15498e6e287fSHans Petter Selasky 
15508e6e287fSHans Petter Selasky 	if (check_mask & ~IB_MR_CHECK_SIG_STATUS) {
15518e6e287fSHans Petter Selasky 		pr_err("Invalid status check mask\n");
15528e6e287fSHans Petter Selasky 		ret = -EINVAL;
15538e6e287fSHans Petter Selasky 		goto done;
15548e6e287fSHans Petter Selasky 	}
15558e6e287fSHans Petter Selasky 
15568e6e287fSHans Petter Selasky 	mr_status->fail_status = 0;
15578e6e287fSHans Petter Selasky 	if (check_mask & IB_MR_CHECK_SIG_STATUS) {
15588e6e287fSHans Petter Selasky 		if (!mmr->sig) {
15598e6e287fSHans Petter Selasky 			ret = -EINVAL;
15608e6e287fSHans Petter Selasky 			pr_err("signature status check requested on a non-signature enabled MR\n");
15618e6e287fSHans Petter Selasky 			goto done;
15628e6e287fSHans Petter Selasky 		}
15638e6e287fSHans Petter Selasky 
15648e6e287fSHans Petter Selasky 		mmr->sig->sig_status_checked = true;
15658e6e287fSHans Petter Selasky 		if (!mmr->sig->sig_err_exists)
15668e6e287fSHans Petter Selasky 			goto done;
15678e6e287fSHans Petter Selasky 
15688e6e287fSHans Petter Selasky 		if (ibmr->lkey == mmr->sig->err_item.key)
15698e6e287fSHans Petter Selasky 			memcpy(&mr_status->sig_err, &mmr->sig->err_item,
15708e6e287fSHans Petter Selasky 			       sizeof(mr_status->sig_err));
15718e6e287fSHans Petter Selasky 		else {
15728e6e287fSHans Petter Selasky 			mr_status->sig_err.err_type = IB_SIG_BAD_GUARD;
15738e6e287fSHans Petter Selasky 			mr_status->sig_err.sig_err_offset = 0;
15748e6e287fSHans Petter Selasky 			mr_status->sig_err.key = mmr->sig->err_item.key;
15758e6e287fSHans Petter Selasky 		}
15768e6e287fSHans Petter Selasky 
15778e6e287fSHans Petter Selasky 		mmr->sig->sig_err_exists = false;
15788e6e287fSHans Petter Selasky 		mr_status->fail_status |= IB_MR_CHECK_SIG_STATUS;
15798e6e287fSHans Petter Selasky 	}
15808e6e287fSHans Petter Selasky 
15818e6e287fSHans Petter Selasky done:
15828e6e287fSHans Petter Selasky 	return ret;
15838e6e287fSHans Petter Selasky }
15848e6e287fSHans Petter Selasky 
15858e6e287fSHans Petter Selasky static int
mlx5_ib_sg_to_klms(struct mlx5_ib_mr * mr,struct scatterlist * sgl,unsigned short sg_nents,unsigned int * sg_offset_p)15868e6e287fSHans Petter Selasky mlx5_ib_sg_to_klms(struct mlx5_ib_mr *mr,
15878e6e287fSHans Petter Selasky 		   struct scatterlist *sgl,
15888e6e287fSHans Petter Selasky 		   unsigned short sg_nents,
15898e6e287fSHans Petter Selasky 		   unsigned int *sg_offset_p)
15908e6e287fSHans Petter Selasky {
15918e6e287fSHans Petter Selasky 	struct scatterlist *sg = sgl;
15928e6e287fSHans Petter Selasky 	struct mlx5_klm *klms = mr->descs;
15938e6e287fSHans Petter Selasky 	unsigned int sg_offset = sg_offset_p ? *sg_offset_p : 0;
15948e6e287fSHans Petter Selasky 	u32 lkey = mr->ibmr.pd->local_dma_lkey;
15958e6e287fSHans Petter Selasky 	int i;
15968e6e287fSHans Petter Selasky 
15978e6e287fSHans Petter Selasky 	mr->ibmr.iova = sg_dma_address(sg) + sg_offset;
15988e6e287fSHans Petter Selasky 	mr->ibmr.length = 0;
15998e6e287fSHans Petter Selasky 	mr->ndescs = sg_nents;
16008e6e287fSHans Petter Selasky 
16018e6e287fSHans Petter Selasky 	for_each_sg(sgl, sg, sg_nents, i) {
16028e6e287fSHans Petter Selasky 		if (unlikely(i > mr->max_descs))
16038e6e287fSHans Petter Selasky 			break;
16048e6e287fSHans Petter Selasky 		klms[i].va = cpu_to_be64(sg_dma_address(sg) + sg_offset);
16058e6e287fSHans Petter Selasky 		klms[i].bcount = cpu_to_be32(sg_dma_len(sg) - sg_offset);
16068e6e287fSHans Petter Selasky 		klms[i].key = cpu_to_be32(lkey);
16078e6e287fSHans Petter Selasky 		mr->ibmr.length += sg_dma_len(sg);
16088e6e287fSHans Petter Selasky 
16098e6e287fSHans Petter Selasky 		sg_offset = 0;
16108e6e287fSHans Petter Selasky 	}
16118e6e287fSHans Petter Selasky 
16128e6e287fSHans Petter Selasky 	if (sg_offset_p)
16138e6e287fSHans Petter Selasky 		*sg_offset_p = sg_offset;
16148e6e287fSHans Petter Selasky 
16158e6e287fSHans Petter Selasky 	return i;
16168e6e287fSHans Petter Selasky }
16178e6e287fSHans Petter Selasky 
mlx5_set_page(struct ib_mr * ibmr,u64 addr)16188e6e287fSHans Petter Selasky static int mlx5_set_page(struct ib_mr *ibmr, u64 addr)
16198e6e287fSHans Petter Selasky {
16208e6e287fSHans Petter Selasky 	struct mlx5_ib_mr *mr = to_mmr(ibmr);
16218e6e287fSHans Petter Selasky 	__be64 *descs;
16228e6e287fSHans Petter Selasky 
16238e6e287fSHans Petter Selasky 	if (unlikely(mr->ndescs == mr->max_descs))
162412515907SHans Petter Selasky 		return -ENOMEM;
162512515907SHans Petter Selasky 
16268e6e287fSHans Petter Selasky 	descs = mr->descs;
16278e6e287fSHans Petter Selasky 	descs[mr->ndescs++] = cpu_to_be64(addr | MLX5_EN_RD | MLX5_EN_WR);
162812515907SHans Petter Selasky 
162912515907SHans Petter Selasky 	return 0;
163012515907SHans Petter Selasky }
163112515907SHans Petter Selasky 
mlx5_ib_map_mr_sg(struct ib_mr * ibmr,struct scatterlist * sg,int sg_nents,unsigned int * sg_offset)16328e6e287fSHans Petter Selasky int mlx5_ib_map_mr_sg(struct ib_mr *ibmr, struct scatterlist *sg, int sg_nents,
16338e6e287fSHans Petter Selasky 		      unsigned int *sg_offset)
163412515907SHans Petter Selasky {
16358e6e287fSHans Petter Selasky 	struct mlx5_ib_mr *mr = to_mmr(ibmr);
16368e6e287fSHans Petter Selasky 	int n;
163712515907SHans Petter Selasky 
16388e6e287fSHans Petter Selasky 	mr->ndescs = 0;
16398e6e287fSHans Petter Selasky 
16408e6e287fSHans Petter Selasky 	ib_dma_sync_single_for_cpu(ibmr->device, mr->desc_map,
16418e6e287fSHans Petter Selasky 				   mr->desc_size * mr->max_descs,
16428e6e287fSHans Petter Selasky 				   DMA_TO_DEVICE);
16438e6e287fSHans Petter Selasky 
16448e6e287fSHans Petter Selasky 	if (mr->access_mode == MLX5_ACCESS_MODE_KLM)
16458e6e287fSHans Petter Selasky 		n = mlx5_ib_sg_to_klms(mr, sg, sg_nents, sg_offset);
16468e6e287fSHans Petter Selasky 	else
16478e6e287fSHans Petter Selasky 		n = ib_sg_to_pages(ibmr, sg, sg_nents, sg_offset,
16488e6e287fSHans Petter Selasky 				mlx5_set_page);
16498e6e287fSHans Petter Selasky 
16508e6e287fSHans Petter Selasky 	ib_dma_sync_single_for_device(ibmr->device, mr->desc_map,
16518e6e287fSHans Petter Selasky 				      mr->desc_size * mr->max_descs,
16528e6e287fSHans Petter Selasky 				      DMA_TO_DEVICE);
16538e6e287fSHans Petter Selasky 
16548e6e287fSHans Petter Selasky 	return n;
165512515907SHans Petter Selasky }
1656