1 /* 2 * Copyright (c) 2004, 2005 Topspin Communications. All rights reserved. 3 * Copyright (c) 2005 Sun Microsystems, Inc. All rights reserved. 4 * Copyright (c) 2005, 2006, 2007 Cisco Systems, Inc. All rights reserved. 5 * Copyright (c) 2005, 2006, 2007, 2008, 2014 Mellanox Technologies. All rights reserved. 6 * Copyright (c) 2004 Voltaire, Inc. All rights reserved. 7 * 8 * This software is available to you under a choice of one of two 9 * licenses. You may choose to be licensed under the terms of the GNU 10 * General Public License (GPL) Version 2, available from the file 11 * COPYING in the main directory of this source tree, or the 12 * OpenIB.org BSD license below: 13 * 14 * Redistribution and use in source and binary forms, with or 15 * without modification, are permitted provided that the following 16 * conditions are met: 17 * 18 * - Redistributions of source code must retain the above 19 * copyright notice, this list of conditions and the following 20 * disclaimer. 21 * 22 * - Redistributions in binary form must reproduce the above 23 * copyright notice, this list of conditions and the following 24 * disclaimer in the documentation and/or other materials 25 * provided with the distribution. 26 * 27 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, 28 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF 29 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND 30 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS 31 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN 32 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN 33 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 34 * SOFTWARE. 35 */ 36 37 #include <linux/hardirq.h> 38 #include <linux/module.h> 39 #include <dev/mlx4/cmd.h> 40 #include <dev/mlx4/cq.h> 41 42 #include "mlx4.h" 43 #include "icm.h" 44 45 #define MLX4_CQ_STATUS_OK ( 0 << 28) 46 #define MLX4_CQ_STATUS_OVERFLOW ( 9 << 28) 47 #define MLX4_CQ_STATUS_WRITE_FAIL (10 << 28) 48 #define MLX4_CQ_FLAG_CC ( 1 << 18) 49 #define MLX4_CQ_FLAG_OI ( 1 << 17) 50 #define MLX4_CQ_STATE_ARMED ( 9 << 8) 51 #define MLX4_CQ_STATE_ARMED_SOL ( 6 << 8) 52 #define MLX4_EQ_STATE_FIRED (10 << 8) 53 54 void mlx4_cq_completion(struct mlx4_dev *dev, u32 cqn) 55 { 56 struct mlx4_cq_table *cq_table = &mlx4_priv(dev)->cq_table; 57 struct mlx4_cq *cq; 58 59 read_lock(&cq_table->cq_table_lock); 60 61 cq = radix_tree_lookup(&mlx4_priv(dev)->cq_table.tree, 62 cqn & (dev->caps.num_cqs - 1)); 63 if (cq) 64 atomic_inc(&cq->refcount); 65 66 read_unlock(&cq_table->cq_table_lock); 67 68 if (!cq) { 69 mlx4_dbg(dev, "Completion event for bogus CQ %08x\n", cqn); 70 return; 71 } 72 73 ++cq->arm_sn; 74 75 cq->comp(cq); 76 77 if (atomic_dec_and_test(&cq->refcount)) 78 complete(&cq->free); 79 } 80 81 void mlx4_cq_event(struct mlx4_dev *dev, u32 cqn, int event_type) 82 { 83 struct mlx4_cq_table *cq_table = &mlx4_priv(dev)->cq_table; 84 struct mlx4_cq *cq; 85 86 read_lock(&cq_table->cq_table_lock); 87 88 cq = radix_tree_lookup(&cq_table->tree, cqn & (dev->caps.num_cqs - 1)); 89 if (cq) 90 atomic_inc(&cq->refcount); 91 92 read_unlock(&cq_table->cq_table_lock); 93 94 if (!cq) { 95 mlx4_warn(dev, "Async event for bogus CQ %08x\n", cqn); 96 return; 97 } 98 99 cq->event(cq, event_type); 100 101 if (atomic_dec_and_test(&cq->refcount)) 102 complete(&cq->free); 103 } 104 105 static int mlx4_SW2HW_CQ(struct mlx4_dev *dev, struct mlx4_cmd_mailbox *mailbox, 106 int cq_num) 107 { 108 return mlx4_cmd(dev, mailbox->dma, cq_num, 0, 109 MLX4_CMD_SW2HW_CQ, MLX4_CMD_TIME_CLASS_A, 110 MLX4_CMD_WRAPPED); 111 } 112 113 static int mlx4_MODIFY_CQ(struct mlx4_dev *dev, struct mlx4_cmd_mailbox *mailbox, 114 int cq_num, u32 opmod) 115 { 116 return mlx4_cmd(dev, mailbox->dma, cq_num, opmod, MLX4_CMD_MODIFY_CQ, 117 MLX4_CMD_TIME_CLASS_A, MLX4_CMD_WRAPPED); 118 } 119 120 static int mlx4_HW2SW_CQ(struct mlx4_dev *dev, struct mlx4_cmd_mailbox *mailbox, 121 int cq_num) 122 { 123 return mlx4_cmd_box(dev, 0, mailbox ? mailbox->dma : 0, 124 cq_num, mailbox ? 0 : 1, MLX4_CMD_HW2SW_CQ, 125 MLX4_CMD_TIME_CLASS_A, MLX4_CMD_WRAPPED); 126 } 127 128 int mlx4_cq_modify(struct mlx4_dev *dev, struct mlx4_cq *cq, 129 u16 count, u16 period) 130 { 131 struct mlx4_cmd_mailbox *mailbox; 132 struct mlx4_cq_context *cq_context; 133 int err; 134 135 mailbox = mlx4_alloc_cmd_mailbox(dev); 136 if (IS_ERR(mailbox)) 137 return PTR_ERR(mailbox); 138 139 cq_context = mailbox->buf; 140 memset(cq_context, 0, sizeof *cq_context); 141 142 cq_context->cq_max_count = cpu_to_be16(count); 143 cq_context->cq_period = cpu_to_be16(period); 144 145 err = mlx4_MODIFY_CQ(dev, mailbox, cq->cqn, 1); 146 147 mlx4_free_cmd_mailbox(dev, mailbox); 148 return err; 149 } 150 EXPORT_SYMBOL_GPL(mlx4_cq_modify); 151 152 int mlx4_cq_resize(struct mlx4_dev *dev, struct mlx4_cq *cq, 153 int entries, struct mlx4_mtt *mtt) 154 { 155 struct mlx4_cmd_mailbox *mailbox; 156 struct mlx4_cq_context *cq_context; 157 u64 mtt_addr; 158 int err; 159 160 mailbox = mlx4_alloc_cmd_mailbox(dev); 161 if (IS_ERR(mailbox)) 162 return PTR_ERR(mailbox); 163 164 cq_context = mailbox->buf; 165 memset(cq_context, 0, sizeof *cq_context); 166 167 cq_context->logsize_usrpage = cpu_to_be32(ilog2(entries) << 24); 168 cq_context->log_page_size = mtt->page_shift - 12; 169 mtt_addr = mlx4_mtt_addr(dev, mtt); 170 cq_context->mtt_base_addr_h = mtt_addr >> 32; 171 cq_context->mtt_base_addr_l = cpu_to_be32(mtt_addr & 0xffffffff); 172 173 err = mlx4_MODIFY_CQ(dev, mailbox, cq->cqn, 0); 174 175 mlx4_free_cmd_mailbox(dev, mailbox); 176 return err; 177 } 178 EXPORT_SYMBOL_GPL(mlx4_cq_resize); 179 180 int mlx4_cq_ignore_overrun(struct mlx4_dev *dev, struct mlx4_cq *cq) 181 { 182 struct mlx4_cmd_mailbox *mailbox; 183 struct mlx4_cq_context *cq_context; 184 int err; 185 186 mailbox = mlx4_alloc_cmd_mailbox(dev); 187 if (IS_ERR(mailbox)) 188 return PTR_ERR(mailbox); 189 190 cq_context = mailbox->buf; 191 memset(cq_context, 0, sizeof *cq_context); 192 193 cq_context->flags |= cpu_to_be32(MLX4_CQ_FLAG_OI); 194 195 err = mlx4_MODIFY_CQ(dev, mailbox, cq->cqn, 3); 196 197 mlx4_free_cmd_mailbox(dev, mailbox); 198 return err; 199 } 200 EXPORT_SYMBOL_GPL(mlx4_cq_ignore_overrun); 201 202 int __mlx4_cq_alloc_icm(struct mlx4_dev *dev, int *cqn) 203 { 204 struct mlx4_priv *priv = mlx4_priv(dev); 205 struct mlx4_cq_table *cq_table = &priv->cq_table; 206 int err; 207 208 *cqn = mlx4_bitmap_alloc(&cq_table->bitmap); 209 if (*cqn == -1) 210 return -ENOMEM; 211 212 err = mlx4_table_get(dev, &cq_table->table, *cqn); 213 if (err) 214 goto err_out; 215 216 err = mlx4_table_get(dev, &cq_table->cmpt_table, *cqn); 217 if (err) 218 goto err_put; 219 return 0; 220 221 err_put: 222 mlx4_table_put(dev, &cq_table->table, *cqn); 223 224 err_out: 225 mlx4_bitmap_free(&cq_table->bitmap, *cqn, MLX4_NO_RR); 226 return err; 227 } 228 229 static int mlx4_cq_alloc_icm(struct mlx4_dev *dev, int *cqn) 230 { 231 u64 out_param; 232 int err; 233 234 if (mlx4_is_mfunc(dev)) { 235 err = mlx4_cmd_imm(dev, 0, &out_param, RES_CQ, 236 RES_OP_RESERVE_AND_MAP, MLX4_CMD_ALLOC_RES, 237 MLX4_CMD_TIME_CLASS_A, MLX4_CMD_WRAPPED); 238 if (err) 239 return err; 240 else { 241 *cqn = get_param_l(&out_param); 242 return 0; 243 } 244 } 245 return __mlx4_cq_alloc_icm(dev, cqn); 246 } 247 248 void __mlx4_cq_free_icm(struct mlx4_dev *dev, int cqn) 249 { 250 struct mlx4_priv *priv = mlx4_priv(dev); 251 struct mlx4_cq_table *cq_table = &priv->cq_table; 252 253 mlx4_table_put(dev, &cq_table->cmpt_table, cqn); 254 mlx4_table_put(dev, &cq_table->table, cqn); 255 mlx4_bitmap_free(&cq_table->bitmap, cqn, MLX4_NO_RR); 256 } 257 258 static void mlx4_cq_free_icm(struct mlx4_dev *dev, int cqn) 259 { 260 u64 in_param = 0; 261 int err; 262 263 if (mlx4_is_mfunc(dev)) { 264 set_param_l(&in_param, cqn); 265 err = mlx4_cmd(dev, in_param, RES_CQ, RES_OP_RESERVE_AND_MAP, 266 MLX4_CMD_FREE_RES, 267 MLX4_CMD_TIME_CLASS_A, MLX4_CMD_WRAPPED); 268 if (err) 269 mlx4_warn(dev, "Failed freeing cq:%d\n", cqn); 270 } else 271 __mlx4_cq_free_icm(dev, cqn); 272 } 273 274 int mlx4_cq_alloc(struct mlx4_dev *dev, int nent, 275 struct mlx4_mtt *mtt, struct mlx4_uar *uar, u64 db_rec, 276 struct mlx4_cq *cq, unsigned vector, int collapsed, 277 int timestamp_en) 278 { 279 struct mlx4_priv *priv = mlx4_priv(dev); 280 struct mlx4_cq_table *cq_table = &priv->cq_table; 281 struct mlx4_cmd_mailbox *mailbox; 282 struct mlx4_cq_context *cq_context; 283 u64 mtt_addr; 284 int err; 285 286 if (vector > dev->caps.num_comp_vectors + dev->caps.comp_pool) 287 return -EINVAL; 288 289 cq->vector = vector; 290 291 err = mlx4_cq_alloc_icm(dev, &cq->cqn); 292 if (err) 293 return err; 294 295 spin_lock_irq(&cq_table->lock); 296 err = radix_tree_insert(&cq_table->tree, cq->cqn, cq); 297 spin_unlock_irq(&cq_table->lock); 298 if (err) 299 goto err_icm; 300 301 mailbox = mlx4_alloc_cmd_mailbox(dev); 302 if (IS_ERR(mailbox)) { 303 err = PTR_ERR(mailbox); 304 goto err_radix; 305 } 306 307 cq_context = mailbox->buf; 308 memset(cq_context, 0, sizeof *cq_context); 309 310 cq_context->flags = cpu_to_be32(!!collapsed << 18); 311 if (timestamp_en) 312 cq_context->flags |= cpu_to_be32(1 << 19); 313 314 cq_context->logsize_usrpage = cpu_to_be32((ilog2(nent) << 24) | uar->index); 315 cq_context->comp_eqn = priv->eq_table.eq[vector].eqn; 316 cq_context->log_page_size = mtt->page_shift - MLX4_ICM_PAGE_SHIFT; 317 318 mtt_addr = mlx4_mtt_addr(dev, mtt); 319 cq_context->mtt_base_addr_h = mtt_addr >> 32; 320 cq_context->mtt_base_addr_l = cpu_to_be32(mtt_addr & 0xffffffff); 321 cq_context->db_rec_addr = cpu_to_be64(db_rec); 322 323 err = mlx4_SW2HW_CQ(dev, mailbox, cq->cqn); 324 mlx4_free_cmd_mailbox(dev, mailbox); 325 if (err) 326 goto err_radix; 327 328 cq->cons_index = 0; 329 cq->arm_sn = 1; 330 cq->uar = uar; 331 atomic_set(&cq->refcount, 1); 332 init_completion(&cq->free); 333 334 cq->eqn = priv->eq_table.eq[cq->vector].eqn; 335 cq->irq = priv->eq_table.eq[cq->vector].irq; 336 337 return 0; 338 339 err_radix: 340 spin_lock_irq(&cq_table->lock); 341 radix_tree_delete(&cq_table->tree, cq->cqn); 342 spin_unlock_irq(&cq_table->lock); 343 344 err_icm: 345 mlx4_cq_free_icm(dev, cq->cqn); 346 347 return err; 348 } 349 EXPORT_SYMBOL_GPL(mlx4_cq_alloc); 350 351 void mlx4_cq_free(struct mlx4_dev *dev, struct mlx4_cq *cq) 352 { 353 struct mlx4_priv *priv = mlx4_priv(dev); 354 struct mlx4_cq_table *cq_table = &priv->cq_table; 355 int err; 356 357 err = mlx4_HW2SW_CQ(dev, NULL, cq->cqn); 358 if (err) 359 mlx4_warn(dev, "HW2SW_CQ failed (%d) for CQN %06x\n", err, cq->cqn); 360 361 synchronize_irq(priv->eq_table.eq[cq->vector].irq); 362 363 spin_lock_irq(&cq_table->lock); 364 radix_tree_delete(&cq_table->tree, cq->cqn); 365 spin_unlock_irq(&cq_table->lock); 366 367 if (atomic_dec_and_test(&cq->refcount)) 368 complete(&cq->free); 369 wait_for_completion(&cq->free); 370 371 mlx4_cq_free_icm(dev, cq->cqn); 372 } 373 EXPORT_SYMBOL_GPL(mlx4_cq_free); 374 375 int mlx4_init_cq_table(struct mlx4_dev *dev) 376 { 377 struct mlx4_cq_table *cq_table = &mlx4_priv(dev)->cq_table; 378 int err; 379 380 spin_lock_init(&cq_table->lock); 381 rwlock_init(&cq_table->cq_table_lock); 382 INIT_RADIX_TREE(&cq_table->tree, GFP_ATOMIC); 383 if (mlx4_is_slave(dev)) 384 return 0; 385 386 err = mlx4_bitmap_init(&cq_table->bitmap, dev->caps.num_cqs, 387 dev->caps.num_cqs - 1, dev->caps.reserved_cqs, 0); 388 if (err) 389 return err; 390 391 return 0; 392 } 393 394 void mlx4_cleanup_cq_table(struct mlx4_dev *dev) 395 { 396 if (mlx4_is_slave(dev)) 397 return; 398 /* Nothing to do to clean up radix_tree */ 399 mlx4_bitmap_cleanup(&mlx4_priv(dev)->cq_table.bitmap); 400 } 401