1 /*- 2 * Copyright (c) 2013-2017, Mellanox Technologies, Ltd. All rights reserved. 3 * 4 * Redistribution and use in source and binary forms, with or without 5 * modification, are permitted provided that the following conditions 6 * are met: 7 * 1. Redistributions of source code must retain the above copyright 8 * notice, this list of conditions and the following disclaimer. 9 * 2. Redistributions in binary form must reproduce the above copyright 10 * notice, this list of conditions and the following disclaimer in the 11 * documentation and/or other materials provided with the distribution. 12 * 13 * THIS SOFTWARE IS PROVIDED BY AUTHOR AND CONTRIBUTORS `AS IS' AND 14 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 15 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 16 * ARE DISCLAIMED. IN NO EVENT SHALL AUTHOR OR CONTRIBUTORS BE LIABLE 17 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 18 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 19 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 20 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 21 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 22 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 23 * SUCH DAMAGE. 24 * 25 * $FreeBSD$ 26 */ 27 28 #include <linux/kernel.h> 29 #include <linux/module.h> 30 #include <linux/hardirq.h> 31 #include <dev/mlx5/driver.h> 32 #include <rdma/ib_verbs.h> 33 #include <dev/mlx5/cq.h> 34 #include "mlx5_core.h" 35 36 #include <sys/epoch.h> 37 38 static void 39 mlx5_cq_table_write_lock(struct mlx5_cq_table *table) 40 { 41 42 atomic_inc(&table->writercount); 43 /* make sure all see the updated writercount */ 44 NET_EPOCH_WAIT(); 45 spin_lock(&table->writerlock); 46 } 47 48 static void 49 mlx5_cq_table_write_unlock(struct mlx5_cq_table *table) 50 { 51 52 spin_unlock(&table->writerlock); 53 atomic_dec(&table->writercount); 54 /* drain all pending CQ callers */ 55 NET_EPOCH_WAIT(); 56 } 57 58 void mlx5_cq_completion(struct mlx5_core_dev *dev, struct mlx5_eqe *eqe) 59 { 60 struct mlx5_cq_table *table = &dev->priv.cq_table; 61 struct mlx5_core_cq *cq; 62 struct epoch_tracker et; 63 u32 cqn; 64 bool do_lock; 65 66 cqn = be32_to_cpu(eqe->data.comp.cqn) & 0xffffff; 67 68 NET_EPOCH_ENTER(et); 69 70 do_lock = atomic_read(&table->writercount) != 0; 71 if (unlikely(do_lock)) 72 spin_lock(&table->writerlock); 73 74 if (likely(cqn < MLX5_CQ_LINEAR_ARRAY_SIZE)) 75 cq = table->linear_array[cqn].cq; 76 else 77 cq = radix_tree_lookup(&table->tree, cqn); 78 79 if (unlikely(do_lock)) 80 spin_unlock(&table->writerlock); 81 82 if (likely(cq != NULL)) { 83 ++cq->arm_sn; 84 cq->comp(cq, eqe); 85 } else { 86 mlx5_core_warn(dev, 87 "Completion event for bogus CQ 0x%x\n", cqn); 88 } 89 90 NET_EPOCH_EXIT(et); 91 } 92 93 void mlx5_cq_event(struct mlx5_core_dev *dev, u32 cqn, int event_type) 94 { 95 struct mlx5_cq_table *table = &dev->priv.cq_table; 96 struct mlx5_core_cq *cq; 97 struct epoch_tracker et; 98 bool do_lock; 99 100 NET_EPOCH_ENTER(et); 101 102 do_lock = atomic_read(&table->writercount) != 0; 103 if (unlikely(do_lock)) 104 spin_lock(&table->writerlock); 105 106 if (likely(cqn < MLX5_CQ_LINEAR_ARRAY_SIZE)) 107 cq = table->linear_array[cqn].cq; 108 else 109 cq = radix_tree_lookup(&table->tree, cqn); 110 111 if (unlikely(do_lock)) 112 spin_unlock(&table->writerlock); 113 114 if (likely(cq != NULL)) { 115 cq->event(cq, event_type); 116 } else { 117 mlx5_core_warn(dev, 118 "Asynchronous event for bogus CQ 0x%x\n", cqn); 119 } 120 121 NET_EPOCH_EXIT(et); 122 } 123 124 int mlx5_core_create_cq(struct mlx5_core_dev *dev, struct mlx5_core_cq *cq, 125 u32 *in, int inlen, u32 *out, int outlen) 126 { 127 struct mlx5_cq_table *table = &dev->priv.cq_table; 128 u32 din[MLX5_ST_SZ_DW(destroy_cq_in)] = {0}; 129 u32 dout[MLX5_ST_SZ_DW(destroy_cq_out)] = {0}; 130 int err; 131 132 memset(out, 0, outlen); 133 MLX5_SET(create_cq_in, in, opcode, MLX5_CMD_OP_CREATE_CQ); 134 err = mlx5_cmd_exec(dev, in, inlen, out, outlen); 135 if (err) 136 return err; 137 138 cq->cqn = MLX5_GET(create_cq_out, out, cqn); 139 cq->cons_index = 0; 140 cq->arm_sn = 0; 141 142 mlx5_cq_table_write_lock(table); 143 err = radix_tree_insert(&table->tree, cq->cqn, cq); 144 if (likely(err == 0 && cq->cqn < MLX5_CQ_LINEAR_ARRAY_SIZE)) 145 table->linear_array[cq->cqn].cq = cq; 146 mlx5_cq_table_write_unlock(table); 147 148 if (err) 149 goto err_cmd; 150 151 cq->pid = curthread->td_proc->p_pid; 152 cq->uar = dev->priv.uar; 153 154 return 0; 155 156 err_cmd: 157 MLX5_SET(destroy_cq_in, din, opcode, MLX5_CMD_OP_DESTROY_CQ); 158 MLX5_SET(destroy_cq_in, din, cqn, cq->cqn); 159 mlx5_cmd_exec(dev, din, sizeof(din), dout, sizeof(dout)); 160 return err; 161 } 162 EXPORT_SYMBOL(mlx5_core_create_cq); 163 164 int mlx5_core_destroy_cq(struct mlx5_core_dev *dev, struct mlx5_core_cq *cq) 165 { 166 struct mlx5_cq_table *table = &dev->priv.cq_table; 167 u32 out[MLX5_ST_SZ_DW(destroy_cq_out)] = {0}; 168 u32 in[MLX5_ST_SZ_DW(destroy_cq_in)] = {0}; 169 struct mlx5_core_cq *tmp; 170 171 mlx5_cq_table_write_lock(table); 172 if (likely(cq->cqn < MLX5_CQ_LINEAR_ARRAY_SIZE)) 173 table->linear_array[cq->cqn].cq = NULL; 174 tmp = radix_tree_delete(&table->tree, cq->cqn); 175 mlx5_cq_table_write_unlock(table); 176 177 if (unlikely(tmp == NULL)) { 178 mlx5_core_warn(dev, "cq 0x%x not found in tree\n", cq->cqn); 179 return -EINVAL; 180 } else if (unlikely(tmp != cq)) { 181 mlx5_core_warn(dev, "corrupted cqn 0x%x\n", cq->cqn); 182 return -EINVAL; 183 } 184 185 MLX5_SET(destroy_cq_in, in, opcode, MLX5_CMD_OP_DESTROY_CQ); 186 MLX5_SET(destroy_cq_in, in, cqn, cq->cqn); 187 return mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out)); 188 } 189 EXPORT_SYMBOL(mlx5_core_destroy_cq); 190 191 int mlx5_core_query_cq(struct mlx5_core_dev *dev, struct mlx5_core_cq *cq, 192 u32 *out, int outlen) 193 { 194 u32 in[MLX5_ST_SZ_DW(query_cq_in)] = {0}; 195 196 MLX5_SET(query_cq_in, in, opcode, MLX5_CMD_OP_QUERY_CQ); 197 MLX5_SET(query_cq_in, in, cqn, cq->cqn); 198 199 return mlx5_cmd_exec(dev, in, sizeof(in), out, outlen); 200 } 201 EXPORT_SYMBOL(mlx5_core_query_cq); 202 203 204 int mlx5_core_modify_cq(struct mlx5_core_dev *dev, struct mlx5_core_cq *cq, 205 u32 *in, int inlen) 206 { 207 u32 out[MLX5_ST_SZ_DW(modify_cq_out)] = {0}; 208 209 MLX5_SET(modify_cq_in, in, opcode, MLX5_CMD_OP_MODIFY_CQ); 210 return mlx5_cmd_exec(dev, in, inlen, out, sizeof(out)); 211 } 212 EXPORT_SYMBOL(mlx5_core_modify_cq); 213 214 int mlx5_core_modify_cq_moderation(struct mlx5_core_dev *dev, 215 struct mlx5_core_cq *cq, 216 u16 cq_period, 217 u16 cq_max_count) 218 { 219 u32 in[MLX5_ST_SZ_DW(modify_cq_in)] = {0}; 220 void *cqc; 221 222 MLX5_SET(modify_cq_in, in, cqn, cq->cqn); 223 cqc = MLX5_ADDR_OF(modify_cq_in, in, cq_context); 224 MLX5_SET(cqc, cqc, cq_period, cq_period); 225 MLX5_SET(cqc, cqc, cq_max_count, cq_max_count); 226 MLX5_SET(modify_cq_in, in, 227 modify_field_select_resize_field_select.modify_field_select.modify_field_select, 228 MLX5_CQ_MODIFY_PERIOD | MLX5_CQ_MODIFY_COUNT); 229 230 return mlx5_core_modify_cq(dev, cq, in, sizeof(in)); 231 } 232 233 int mlx5_core_modify_cq_moderation_mode(struct mlx5_core_dev *dev, 234 struct mlx5_core_cq *cq, 235 u16 cq_period, 236 u16 cq_max_count, 237 u8 cq_mode) 238 { 239 u32 in[MLX5_ST_SZ_DW(modify_cq_in)] = {0}; 240 void *cqc; 241 242 MLX5_SET(modify_cq_in, in, cqn, cq->cqn); 243 cqc = MLX5_ADDR_OF(modify_cq_in, in, cq_context); 244 MLX5_SET(cqc, cqc, cq_period, cq_period); 245 MLX5_SET(cqc, cqc, cq_max_count, cq_max_count); 246 MLX5_SET(cqc, cqc, cq_period_mode, cq_mode); 247 MLX5_SET(modify_cq_in, in, 248 modify_field_select_resize_field_select.modify_field_select.modify_field_select, 249 MLX5_CQ_MODIFY_PERIOD | MLX5_CQ_MODIFY_COUNT | MLX5_CQ_MODIFY_PERIOD_MODE); 250 251 return mlx5_core_modify_cq(dev, cq, in, sizeof(in)); 252 } 253 254 int mlx5_init_cq_table(struct mlx5_core_dev *dev) 255 { 256 struct mlx5_cq_table *table = &dev->priv.cq_table; 257 258 memset(table, 0, sizeof(*table)); 259 spin_lock_init(&table->writerlock); 260 INIT_RADIX_TREE(&table->tree, GFP_ATOMIC); 261 262 return 0; 263 } 264 265 void mlx5_cleanup_cq_table(struct mlx5_core_dev *dev) 266 { 267 } 268