xref: /dpdk/drivers/net/ena/base/ena_com.c (revision 95eaa71c66eadb6a6924cc5241e02d3dcc58217c)
1 /* SPDX-License-Identifier: BSD-3-Clause */
2 /* Copyright (c) Amazon.com, Inc. or its affiliates.
3  * All rights reserved.
4  */
5 
6 #include "ena_com.h"
7 
8 /*****************************************************************************/
9 /*****************************************************************************/
10 
11 /* Timeout in micro-sec */
12 #define ADMIN_CMD_TIMEOUT_US (3000000)
13 
14 #define ENA_ASYNC_QUEUE_DEPTH 16
15 #define ENA_ADMIN_QUEUE_DEPTH 32
16 
17 #define ENA_CTRL_MAJOR		0
18 #define ENA_CTRL_MINOR		0
19 #define ENA_CTRL_SUB_MINOR	1
20 
21 #define MIN_ENA_CTRL_VER \
22 	(((ENA_CTRL_MAJOR) << \
23 	(ENA_REGS_CONTROLLER_VERSION_MAJOR_VERSION_SHIFT)) | \
24 	((ENA_CTRL_MINOR) << \
25 	(ENA_REGS_CONTROLLER_VERSION_MINOR_VERSION_SHIFT)) | \
26 	(ENA_CTRL_SUB_MINOR))
27 
28 #define ENA_DMA_ADDR_TO_UINT32_LOW(x)	((u32)((u64)(x)))
29 #define ENA_DMA_ADDR_TO_UINT32_HIGH(x)	((u32)(((u64)(x)) >> 32))
30 
31 #define ENA_MMIO_READ_TIMEOUT 0xFFFFFFFF
32 
33 #define ENA_COM_BOUNCE_BUFFER_CNTRL_CNT	4
34 
35 #define ENA_REGS_ADMIN_INTR_MASK 1
36 
37 #define ENA_MAX_BACKOFF_DELAY_EXP 16U
38 
39 #define ENA_MIN_ADMIN_POLL_US 100
40 
41 #define ENA_MAX_ADMIN_POLL_US 5000
42 
43 /* PHC definitions */
44 #define ENA_PHC_DEFAULT_EXPIRE_TIMEOUT_USEC 10
45 #define ENA_PHC_DEFAULT_BLOCK_TIMEOUT_USEC 1000
46 #define ENA_PHC_MAX_ERROR_BOUND 0xFFFFFFFF
47 #define ENA_PHC_REQ_ID_OFFSET 0xDEAD
48 #define ENA_PHC_ERROR_FLAGS (ENA_ADMIN_PHC_ERROR_FLAG_TIMESTAMP | \
49 			     ENA_ADMIN_PHC_ERROR_FLAG_ERROR_BOUND)
50 
51 /*****************************************************************************/
52 /*****************************************************************************/
53 /*****************************************************************************/
54 
55 enum ena_cmd_status {
56 	ENA_CMD_SUBMITTED,
57 	ENA_CMD_COMPLETED,
58 	/* Abort - canceled by the driver */
59 	ENA_CMD_ABORTED,
60 };
61 
62 struct ena_comp_ctx {
63 	ena_wait_event_t wait_event;
64 	struct ena_admin_acq_entry *user_cqe;
65 	u32 comp_size;
66 	enum ena_cmd_status status;
67 	/* status from the device */
68 	u8 comp_status;
69 	u8 cmd_opcode;
70 	bool occupied;
71 };
72 
73 struct ena_com_stats_ctx {
74 	struct ena_admin_aq_get_stats_cmd get_cmd;
75 	struct ena_admin_acq_get_stats_resp get_resp;
76 };
77 
ena_com_mem_addr_set(struct ena_com_dev * ena_dev,struct ena_common_mem_addr * ena_addr,dma_addr_t addr)78 static int ena_com_mem_addr_set(struct ena_com_dev *ena_dev,
79 				       struct ena_common_mem_addr *ena_addr,
80 				       dma_addr_t addr)
81 {
82 	if (unlikely((addr & GENMASK_ULL(ena_dev->dma_addr_bits - 1, 0)) != addr)) {
83 		ena_trc_err(ena_dev, "DMA address has more bits than the device supports\n");
84 		return ENA_COM_INVAL;
85 	}
86 
87 	ena_addr->mem_addr_low = lower_32_bits(addr);
88 	ena_addr->mem_addr_high = (u16)upper_32_bits(addr);
89 
90 	return 0;
91 }
92 
ena_com_admin_init_sq(struct ena_com_admin_queue * admin_queue)93 static int ena_com_admin_init_sq(struct ena_com_admin_queue *admin_queue)
94 {
95 	struct ena_com_dev *ena_dev = admin_queue->ena_dev;
96 	struct ena_com_admin_sq *sq = &admin_queue->sq;
97 	u16 size = ADMIN_SQ_SIZE(admin_queue->q_depth);
98 
99 	ENA_MEM_ALLOC_COHERENT(admin_queue->q_dmadev, size, sq->entries, sq->dma_addr,
100 			       sq->mem_handle);
101 
102 	if (unlikely(!sq->entries)) {
103 		ena_trc_err(ena_dev, "Memory allocation failed\n");
104 		return ENA_COM_NO_MEM;
105 	}
106 
107 	sq->head = 0;
108 	sq->tail = 0;
109 	sq->phase = 1;
110 
111 	sq->db_addr = NULL;
112 
113 	return 0;
114 }
115 
ena_com_admin_init_cq(struct ena_com_admin_queue * admin_queue)116 static int ena_com_admin_init_cq(struct ena_com_admin_queue *admin_queue)
117 {
118 	struct ena_com_dev *ena_dev = admin_queue->ena_dev;
119 	struct ena_com_admin_cq *cq = &admin_queue->cq;
120 	u16 size = ADMIN_CQ_SIZE(admin_queue->q_depth);
121 
122 	ENA_MEM_ALLOC_COHERENT(admin_queue->q_dmadev, size, cq->entries, cq->dma_addr,
123 			       cq->mem_handle);
124 
125 	if (unlikely(!cq->entries))  {
126 		ena_trc_err(ena_dev, "Memory allocation failed\n");
127 		return ENA_COM_NO_MEM;
128 	}
129 
130 	cq->head = 0;
131 	cq->phase = 1;
132 
133 	return 0;
134 }
135 
ena_com_admin_init_aenq(struct ena_com_dev * ena_dev,struct ena_aenq_handlers * aenq_handlers)136 static int ena_com_admin_init_aenq(struct ena_com_dev *ena_dev,
137 				   struct ena_aenq_handlers *aenq_handlers)
138 {
139 	struct ena_com_aenq *aenq = &ena_dev->aenq;
140 	u32 addr_low, addr_high, aenq_caps;
141 	u16 size;
142 
143 	ena_dev->aenq.q_depth = ENA_ASYNC_QUEUE_DEPTH;
144 	size = ADMIN_AENQ_SIZE(ENA_ASYNC_QUEUE_DEPTH);
145 	ENA_MEM_ALLOC_COHERENT(ena_dev->dmadev, size,
146 			aenq->entries,
147 			aenq->dma_addr,
148 			aenq->mem_handle);
149 
150 	if (unlikely(!aenq->entries)) {
151 		ena_trc_err(ena_dev, "Memory allocation failed\n");
152 		return ENA_COM_NO_MEM;
153 	}
154 
155 	aenq->head = aenq->q_depth;
156 	aenq->phase = 1;
157 
158 	addr_low = ENA_DMA_ADDR_TO_UINT32_LOW(aenq->dma_addr);
159 	addr_high = ENA_DMA_ADDR_TO_UINT32_HIGH(aenq->dma_addr);
160 
161 	ENA_REG_WRITE32(ena_dev->bus, addr_low, ena_dev->reg_bar + ENA_REGS_AENQ_BASE_LO_OFF);
162 	ENA_REG_WRITE32(ena_dev->bus, addr_high, ena_dev->reg_bar + ENA_REGS_AENQ_BASE_HI_OFF);
163 
164 	aenq_caps = 0;
165 	aenq_caps |= ENA_FIELD_PREP(ena_dev->aenq.q_depth,
166 				    ENA_REGS_AENQ_CAPS_AENQ_DEPTH_MASK,
167 				    ENA_ZERO_SHIFT);
168 
169 	aenq_caps |= ENA_FIELD_PREP(sizeof(struct ena_admin_aenq_entry),
170 				    ENA_REGS_AENQ_CAPS_AENQ_ENTRY_SIZE_MASK,
171 				    ENA_REGS_AENQ_CAPS_AENQ_ENTRY_SIZE_SHIFT);
172 	ENA_REG_WRITE32(ena_dev->bus, aenq_caps, ena_dev->reg_bar + ENA_REGS_AENQ_CAPS_OFF);
173 
174 	if (unlikely(!aenq_handlers)) {
175 		ena_trc_err(ena_dev, "AENQ handlers pointer is NULL\n");
176 		return ENA_COM_INVAL;
177 	}
178 
179 	aenq->aenq_handlers = aenq_handlers;
180 
181 	return 0;
182 }
183 
comp_ctxt_release(struct ena_com_admin_queue * queue,struct ena_comp_ctx * comp_ctx)184 static void comp_ctxt_release(struct ena_com_admin_queue *queue,
185 				     struct ena_comp_ctx *comp_ctx)
186 {
187 	comp_ctx->user_cqe = NULL;
188 	comp_ctx->occupied = false;
189 	ATOMIC32_DEC(&queue->outstanding_cmds);
190 }
191 
get_comp_ctxt(struct ena_com_admin_queue * admin_queue,u16 command_id,bool capture)192 static struct ena_comp_ctx *get_comp_ctxt(struct ena_com_admin_queue *admin_queue,
193 					  u16 command_id, bool capture)
194 {
195 	if (unlikely(command_id >= admin_queue->q_depth)) {
196 		ena_trc_err(admin_queue->ena_dev,
197 			    "Command id is larger than the queue size. cmd_id: %u queue size %d\n",
198 			    command_id, admin_queue->q_depth);
199 		return NULL;
200 	}
201 
202 	if (unlikely(!admin_queue->comp_ctx)) {
203 		ena_trc_err(admin_queue->ena_dev,
204 			    "Completion context is NULL\n");
205 		return NULL;
206 	}
207 
208 	if (unlikely(admin_queue->comp_ctx[command_id].occupied && capture)) {
209 		ena_trc_err(admin_queue->ena_dev,
210 			    "Completion context is occupied\n");
211 		return NULL;
212 	}
213 
214 	if (capture) {
215 		ATOMIC32_INC(&admin_queue->outstanding_cmds);
216 		admin_queue->comp_ctx[command_id].occupied = true;
217 	}
218 
219 	return &admin_queue->comp_ctx[command_id];
220 }
221 
__ena_com_submit_admin_cmd(struct ena_com_admin_queue * admin_queue,struct ena_admin_aq_entry * cmd,size_t cmd_size_in_bytes,struct ena_admin_acq_entry * comp,size_t comp_size_in_bytes)222 static struct ena_comp_ctx *__ena_com_submit_admin_cmd(struct ena_com_admin_queue *admin_queue,
223 						       struct ena_admin_aq_entry *cmd,
224 						       size_t cmd_size_in_bytes,
225 						       struct ena_admin_acq_entry *comp,
226 						       size_t comp_size_in_bytes)
227 {
228 	struct ena_comp_ctx *comp_ctx;
229 	u16 tail_masked, cmd_id;
230 	u16 queue_size_mask;
231 	u16 cnt;
232 
233 	queue_size_mask = admin_queue->q_depth - 1;
234 
235 	tail_masked = admin_queue->sq.tail & queue_size_mask;
236 
237 	/* In case of queue FULL */
238 	cnt = (u16)ATOMIC32_READ(&admin_queue->outstanding_cmds);
239 	if (unlikely(cnt >= admin_queue->q_depth)) {
240 		ena_trc_dbg(admin_queue->ena_dev, "Admin queue is full.\n");
241 		admin_queue->stats.out_of_space++;
242 		return ERR_PTR(ENA_COM_NO_SPACE);
243 	}
244 
245 	cmd_id = admin_queue->curr_cmd_id;
246 
247 	cmd->aq_common_descriptor.flags |= admin_queue->sq.phase &
248 		ENA_ADMIN_AQ_COMMON_DESC_PHASE_MASK;
249 
250 	cmd->aq_common_descriptor.command_id |= cmd_id &
251 		ENA_ADMIN_AQ_COMMON_DESC_COMMAND_ID_MASK;
252 
253 	comp_ctx = get_comp_ctxt(admin_queue, cmd_id, true);
254 	if (unlikely(!comp_ctx))
255 		return ERR_PTR(ENA_COM_INVAL);
256 
257 	comp_ctx->status = ENA_CMD_SUBMITTED;
258 	comp_ctx->comp_size = (u32)comp_size_in_bytes;
259 	comp_ctx->user_cqe = comp;
260 	comp_ctx->cmd_opcode = cmd->aq_common_descriptor.opcode;
261 
262 	ENA_WAIT_EVENT_CLEAR(comp_ctx->wait_event);
263 
264 	memcpy(&admin_queue->sq.entries[tail_masked], cmd, cmd_size_in_bytes);
265 
266 	admin_queue->curr_cmd_id = (admin_queue->curr_cmd_id + 1) &
267 		queue_size_mask;
268 
269 	admin_queue->sq.tail++;
270 	admin_queue->stats.submitted_cmd++;
271 
272 	if (unlikely((admin_queue->sq.tail & queue_size_mask) == 0))
273 		admin_queue->sq.phase = !admin_queue->sq.phase;
274 
275 	ENA_DB_SYNC(&admin_queue->sq.mem_handle);
276 	ENA_REG_WRITE32(admin_queue->bus, admin_queue->sq.tail,
277 			admin_queue->sq.db_addr);
278 
279 	return comp_ctx;
280 }
281 
ena_com_init_comp_ctxt(struct ena_com_admin_queue * admin_queue)282 static int ena_com_init_comp_ctxt(struct ena_com_admin_queue *admin_queue)
283 {
284 	struct ena_com_dev *ena_dev = admin_queue->ena_dev;
285 	size_t size = admin_queue->q_depth * sizeof(struct ena_comp_ctx);
286 	struct ena_comp_ctx *comp_ctx;
287 	u16 i;
288 
289 	admin_queue->comp_ctx = ENA_MEM_ALLOC(admin_queue->q_dmadev, size);
290 	if (unlikely(!admin_queue->comp_ctx)) {
291 		ena_trc_err(ena_dev, "Memory allocation failed\n");
292 		return ENA_COM_NO_MEM;
293 	}
294 
295 	for (i = 0; i < admin_queue->q_depth; i++) {
296 		comp_ctx = get_comp_ctxt(admin_queue, i, false);
297 		if (comp_ctx)
298 			ENA_WAIT_EVENT_INIT(comp_ctx->wait_event);
299 	}
300 
301 	return 0;
302 }
303 
ena_com_submit_admin_cmd(struct ena_com_admin_queue * admin_queue,struct ena_admin_aq_entry * cmd,size_t cmd_size_in_bytes,struct ena_admin_acq_entry * comp,size_t comp_size_in_bytes)304 static struct ena_comp_ctx *ena_com_submit_admin_cmd(struct ena_com_admin_queue *admin_queue,
305 						     struct ena_admin_aq_entry *cmd,
306 						     size_t cmd_size_in_bytes,
307 						     struct ena_admin_acq_entry *comp,
308 						     size_t comp_size_in_bytes)
309 {
310 	unsigned long flags = 0;
311 	struct ena_comp_ctx *comp_ctx;
312 
313 	ENA_SPINLOCK_LOCK(admin_queue->q_lock, flags);
314 	if (unlikely(!admin_queue->running_state)) {
315 		ENA_SPINLOCK_UNLOCK(admin_queue->q_lock, flags);
316 		return ERR_PTR(ENA_COM_NO_DEVICE);
317 	}
318 	comp_ctx = __ena_com_submit_admin_cmd(admin_queue, cmd,
319 					      cmd_size_in_bytes,
320 					      comp,
321 					      comp_size_in_bytes);
322 	if (IS_ERR(comp_ctx))
323 		admin_queue->running_state = false;
324 	ENA_SPINLOCK_UNLOCK(admin_queue->q_lock, flags);
325 
326 	return comp_ctx;
327 }
328 
ena_com_init_io_sq(struct ena_com_dev * ena_dev,struct ena_com_create_io_ctx * ctx,struct ena_com_io_sq * io_sq)329 static int ena_com_init_io_sq(struct ena_com_dev *ena_dev,
330 			      struct ena_com_create_io_ctx *ctx,
331 			      struct ena_com_io_sq *io_sq)
332 {
333 	size_t size;
334 
335 	memset(&io_sq->desc_addr, 0x0, sizeof(io_sq->desc_addr));
336 
337 	io_sq->dma_addr_bits = (u8)ena_dev->dma_addr_bits;
338 	io_sq->desc_entry_size =
339 		(io_sq->direction == ENA_COM_IO_QUEUE_DIRECTION_TX) ?
340 		sizeof(struct ena_eth_io_tx_desc) :
341 		sizeof(struct ena_eth_io_rx_desc);
342 
343 	size = io_sq->desc_entry_size * io_sq->q_depth;
344 	io_sq->bus = ena_dev->bus;
345 
346 	if (io_sq->mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_HOST) {
347 		ENA_MEM_ALLOC_COHERENT_NODE(ena_dev->dmadev,
348 					    size,
349 					    io_sq->desc_addr.virt_addr,
350 					    io_sq->desc_addr.phys_addr,
351 					    io_sq->desc_addr.mem_handle,
352 					    ctx->numa_node);
353 		if (!io_sq->desc_addr.virt_addr) {
354 			ENA_MEM_ALLOC_COHERENT(ena_dev->dmadev,
355 					       size,
356 					       io_sq->desc_addr.virt_addr,
357 					       io_sq->desc_addr.phys_addr,
358 					       io_sq->desc_addr.mem_handle);
359 		}
360 
361 		if (unlikely(!io_sq->desc_addr.virt_addr)) {
362 			ena_trc_err(ena_dev, "Memory allocation failed\n");
363 			return ENA_COM_NO_MEM;
364 		}
365 	}
366 
367 	if (io_sq->mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_DEV) {
368 		/* Allocate bounce buffers */
369 		io_sq->bounce_buf_ctrl.buffer_size =
370 			ena_dev->llq_info.desc_list_entry_size;
371 		io_sq->bounce_buf_ctrl.buffers_num =
372 			ENA_COM_BOUNCE_BUFFER_CNTRL_CNT;
373 		io_sq->bounce_buf_ctrl.next_to_use = 0;
374 
375 		size = (size_t)io_sq->bounce_buf_ctrl.buffer_size *
376 			io_sq->bounce_buf_ctrl.buffers_num;
377 
378 		ENA_MEM_ALLOC_NODE(ena_dev->dmadev,
379 				   size,
380 				   io_sq->bounce_buf_ctrl.base_buffer,
381 				   ctx->numa_node);
382 		if (!io_sq->bounce_buf_ctrl.base_buffer)
383 			io_sq->bounce_buf_ctrl.base_buffer = ENA_MEM_ALLOC(ena_dev->dmadev, size);
384 
385 		if (unlikely(!io_sq->bounce_buf_ctrl.base_buffer)) {
386 			ena_trc_err(ena_dev, "Bounce buffer memory allocation failed\n");
387 			return ENA_COM_NO_MEM;
388 		}
389 
390 		memcpy(&io_sq->llq_info, &ena_dev->llq_info,
391 		       sizeof(io_sq->llq_info));
392 
393 		/* Initiate the first bounce buffer */
394 		io_sq->llq_buf_ctrl.curr_bounce_buf =
395 			ena_com_get_next_bounce_buffer(&io_sq->bounce_buf_ctrl);
396 		memset(io_sq->llq_buf_ctrl.curr_bounce_buf,
397 		       0x0, io_sq->llq_info.desc_list_entry_size);
398 		io_sq->llq_buf_ctrl.descs_left_in_line =
399 			io_sq->llq_info.descs_num_before_header;
400 		io_sq->disable_meta_caching =
401 			io_sq->llq_info.disable_meta_caching;
402 
403 		if (io_sq->llq_info.max_entries_in_tx_burst > 0)
404 			io_sq->entries_in_tx_burst_left =
405 				io_sq->llq_info.max_entries_in_tx_burst;
406 	}
407 
408 	io_sq->tail = 0;
409 	io_sq->next_to_comp = 0;
410 	io_sq->phase = 1;
411 
412 	return 0;
413 }
414 
ena_com_init_io_cq(struct ena_com_dev * ena_dev,struct ena_com_create_io_ctx * ctx,struct ena_com_io_cq * io_cq)415 static int ena_com_init_io_cq(struct ena_com_dev *ena_dev,
416 			      struct ena_com_create_io_ctx *ctx,
417 			      struct ena_com_io_cq *io_cq)
418 {
419 	size_t size;
420 
421 	memset(&io_cq->cdesc_addr, 0x0, sizeof(io_cq->cdesc_addr));
422 
423 	/* Use the basic completion descriptor for Rx */
424 	io_cq->cdesc_entry_size_in_bytes =
425 		(io_cq->direction == ENA_COM_IO_QUEUE_DIRECTION_TX) ?
426 		sizeof(struct ena_eth_io_tx_cdesc) :
427 		sizeof(struct ena_eth_io_rx_cdesc_base);
428 
429 	size = io_cq->cdesc_entry_size_in_bytes * io_cq->q_depth;
430 	io_cq->bus = ena_dev->bus;
431 
432 	ENA_MEM_ALLOC_COHERENT_NODE_ALIGNED(ena_dev->dmadev,
433 					    size,
434 					    io_cq->cdesc_addr.virt_addr,
435 					    io_cq->cdesc_addr.phys_addr,
436 					    io_cq->cdesc_addr.mem_handle,
437 					    ctx->numa_node,
438 					    ENA_CDESC_RING_SIZE_ALIGNMENT);
439 	if (!io_cq->cdesc_addr.virt_addr) {
440 		ENA_MEM_ALLOC_COHERENT_ALIGNED(ena_dev->dmadev,
441 					       size,
442 					       io_cq->cdesc_addr.virt_addr,
443 					       io_cq->cdesc_addr.phys_addr,
444 					       io_cq->cdesc_addr.mem_handle,
445 					       ENA_CDESC_RING_SIZE_ALIGNMENT);
446 	}
447 
448 	if (unlikely(!io_cq->cdesc_addr.virt_addr)) {
449 		ena_trc_err(ena_dev, "Memory allocation failed\n");
450 		return ENA_COM_NO_MEM;
451 	}
452 
453 	io_cq->phase = 1;
454 	io_cq->head = 0;
455 
456 	return 0;
457 }
458 
ena_com_handle_single_admin_completion(struct ena_com_admin_queue * admin_queue,struct ena_admin_acq_entry * cqe)459 static void ena_com_handle_single_admin_completion(struct ena_com_admin_queue *admin_queue,
460 						   struct ena_admin_acq_entry *cqe)
461 {
462 	struct ena_comp_ctx *comp_ctx;
463 	u16 cmd_id;
464 
465 	cmd_id = cqe->acq_common_descriptor.command &
466 		ENA_ADMIN_ACQ_COMMON_DESC_COMMAND_ID_MASK;
467 
468 	comp_ctx = get_comp_ctxt(admin_queue, cmd_id, false);
469 	if (unlikely(!comp_ctx)) {
470 		ena_trc_err(admin_queue->ena_dev,
471 			    "comp_ctx is NULL. Changing the admin queue running state\n");
472 		admin_queue->running_state = false;
473 		return;
474 	}
475 
476 	if (!comp_ctx->occupied)
477 		return;
478 
479 	comp_ctx->status = ENA_CMD_COMPLETED;
480 	comp_ctx->comp_status = cqe->acq_common_descriptor.status;
481 
482 	if (comp_ctx->user_cqe)
483 		memcpy(comp_ctx->user_cqe, (void *)cqe, comp_ctx->comp_size);
484 
485 	if (!admin_queue->polling)
486 		ENA_WAIT_EVENT_SIGNAL(comp_ctx->wait_event);
487 }
488 
ena_com_handle_admin_completion(struct ena_com_admin_queue * admin_queue)489 static void ena_com_handle_admin_completion(struct ena_com_admin_queue *admin_queue)
490 {
491 	struct ena_admin_acq_entry *cqe = NULL;
492 	u16 comp_num = 0;
493 	u16 head_masked;
494 	u8 phase;
495 
496 	head_masked = admin_queue->cq.head & (admin_queue->q_depth - 1);
497 	phase = admin_queue->cq.phase;
498 
499 	cqe = &admin_queue->cq.entries[head_masked];
500 
501 	/* Go over all the completions */
502 	while ((READ_ONCE8(cqe->acq_common_descriptor.flags) &
503 			ENA_ADMIN_ACQ_COMMON_DESC_PHASE_MASK) == phase) {
504 		/* Do not read the rest of the completion entry before the
505 		 * phase bit was validated
506 		 */
507 		dma_rmb();
508 		ena_com_handle_single_admin_completion(admin_queue, cqe);
509 
510 		head_masked++;
511 		comp_num++;
512 		if (unlikely(head_masked == admin_queue->q_depth)) {
513 			head_masked = 0;
514 			phase = !phase;
515 		}
516 
517 		cqe = &admin_queue->cq.entries[head_masked];
518 	}
519 
520 	admin_queue->cq.head += comp_num;
521 	admin_queue->cq.phase = phase;
522 	admin_queue->sq.head += comp_num;
523 	admin_queue->stats.completed_cmd += comp_num;
524 }
525 
ena_com_comp_status_to_errno(struct ena_com_admin_queue * admin_queue,u8 comp_status)526 static int ena_com_comp_status_to_errno(struct ena_com_admin_queue *admin_queue,
527 					u8 comp_status)
528 {
529 	if (unlikely(comp_status != 0))
530 		ena_trc_err(admin_queue->ena_dev,
531 			    "Admin command failed[%u]\n", comp_status);
532 
533 	switch (comp_status) {
534 	case ENA_ADMIN_SUCCESS:
535 		return ENA_COM_OK;
536 	case ENA_ADMIN_RESOURCE_ALLOCATION_FAILURE:
537 		return ENA_COM_NO_MEM;
538 	case ENA_ADMIN_UNSUPPORTED_OPCODE:
539 		return ENA_COM_UNSUPPORTED;
540 	case ENA_ADMIN_BAD_OPCODE:
541 	case ENA_ADMIN_MALFORMED_REQUEST:
542 	case ENA_ADMIN_ILLEGAL_PARAMETER:
543 	case ENA_ADMIN_UNKNOWN_ERROR:
544 		return ENA_COM_INVAL;
545 	case ENA_ADMIN_RESOURCE_BUSY:
546 		return ENA_COM_TRY_AGAIN;
547 	}
548 
549 	return ENA_COM_INVAL;
550 }
551 
ena_delay_exponential_backoff_us(u32 exp,u32 delay_us)552 static void ena_delay_exponential_backoff_us(u32 exp, u32 delay_us)
553 {
554 	exp = ENA_MIN32(ENA_MAX_BACKOFF_DELAY_EXP, exp);
555 	delay_us = ENA_MAX32(ENA_MIN_ADMIN_POLL_US, delay_us);
556 	delay_us = ENA_MIN32(ENA_MAX_ADMIN_POLL_US, delay_us * (1U << exp));
557 	ENA_USLEEP(delay_us);
558 }
559 
ena_com_wait_and_process_admin_cq_polling(struct ena_comp_ctx * comp_ctx,struct ena_com_admin_queue * admin_queue)560 static int ena_com_wait_and_process_admin_cq_polling(struct ena_comp_ctx *comp_ctx,
561 						     struct ena_com_admin_queue *admin_queue)
562 {
563 	unsigned long flags = 0;
564 	ena_time_t timeout;
565 	int ret;
566 	u32 exp = 0;
567 
568 	timeout = ENA_GET_SYSTEM_TIMEOUT(admin_queue->completion_timeout);
569 
570 	while (1) {
571 		ENA_SPINLOCK_LOCK(admin_queue->q_lock, flags);
572 		ena_com_handle_admin_completion(admin_queue);
573 		ENA_SPINLOCK_UNLOCK(admin_queue->q_lock, flags);
574 
575 		if (comp_ctx->status != ENA_CMD_SUBMITTED)
576 			break;
577 
578 		if (unlikely(ENA_TIME_EXPIRE(timeout))) {
579 			ena_trc_err(admin_queue->ena_dev,
580 				    "Wait for completion (polling) timeout\n");
581 			/* ENA didn't have any completion */
582 			ENA_SPINLOCK_LOCK(admin_queue->q_lock, flags);
583 			admin_queue->stats.no_completion++;
584 			admin_queue->running_state = false;
585 			ENA_SPINLOCK_UNLOCK(admin_queue->q_lock, flags);
586 
587 			ret = ENA_COM_TIMER_EXPIRED;
588 			goto err;
589 		}
590 
591 		ena_delay_exponential_backoff_us(exp++,
592 						 admin_queue->ena_dev->ena_min_poll_delay_us);
593 	}
594 
595 	if (unlikely(comp_ctx->status == ENA_CMD_ABORTED)) {
596 		ena_trc_err(admin_queue->ena_dev, "Command was aborted\n");
597 		ENA_SPINLOCK_LOCK(admin_queue->q_lock, flags);
598 		admin_queue->stats.aborted_cmd++;
599 		ENA_SPINLOCK_UNLOCK(admin_queue->q_lock, flags);
600 		ret = ENA_COM_NO_DEVICE;
601 		goto err;
602 	}
603 
604 	ret = ena_com_comp_status_to_errno(admin_queue, comp_ctx->comp_status);
605 err:
606 	comp_ctxt_release(admin_queue, comp_ctx);
607 	return ret;
608 }
609 
610 /*
611  * Set the LLQ configurations of the firmware
612  *
613  * The driver provides only the enabled feature values to the device,
614  * which in turn, checks if they are supported.
615  */
ena_com_set_llq(struct ena_com_dev * ena_dev)616 static int ena_com_set_llq(struct ena_com_dev *ena_dev)
617 {
618 	struct ena_com_admin_queue *admin_queue;
619 	struct ena_admin_set_feat_cmd cmd;
620 	struct ena_admin_set_feat_resp resp;
621 	struct ena_com_llq_info *llq_info = &ena_dev->llq_info;
622 	int ret;
623 
624 	memset(&cmd, 0x0, sizeof(cmd));
625 	admin_queue = &ena_dev->admin_queue;
626 
627 	cmd.aq_common_descriptor.opcode = ENA_ADMIN_SET_FEATURE;
628 	cmd.feat_common.feature_id = ENA_ADMIN_LLQ;
629 
630 	cmd.u.llq.header_location_ctrl_enabled = llq_info->header_location_ctrl;
631 	cmd.u.llq.entry_size_ctrl_enabled = llq_info->desc_list_entry_size_ctrl;
632 	cmd.u.llq.desc_num_before_header_enabled = llq_info->descs_num_before_header;
633 	cmd.u.llq.descriptors_stride_ctrl_enabled = llq_info->desc_stride_ctrl;
634 
635 	cmd.u.llq.accel_mode.u.set.enabled_flags =
636 		BIT(ENA_ADMIN_DISABLE_META_CACHING) |
637 		BIT(ENA_ADMIN_LIMIT_TX_BURST);
638 
639 	ret = ena_com_execute_admin_command(admin_queue,
640 					    (struct ena_admin_aq_entry *)&cmd,
641 					    sizeof(cmd),
642 					    (struct ena_admin_acq_entry *)&resp,
643 					    sizeof(resp));
644 
645 	if (unlikely(ret))
646 		ena_trc_err(ena_dev, "Failed to set LLQ configurations: %d\n", ret);
647 
648 	return ret;
649 }
650 
ena_com_config_llq_info(struct ena_com_dev * ena_dev,struct ena_admin_feature_llq_desc * llq_features,struct ena_llq_configurations * llq_default_cfg)651 static int ena_com_config_llq_info(struct ena_com_dev *ena_dev,
652 				   struct ena_admin_feature_llq_desc *llq_features,
653 				   struct ena_llq_configurations *llq_default_cfg)
654 {
655 	struct ena_com_llq_info *llq_info = &ena_dev->llq_info;
656 	struct ena_admin_accel_mode_get llq_accel_mode_get;
657 	u16 supported_feat;
658 	int rc;
659 
660 	memset(llq_info, 0, sizeof(*llq_info));
661 
662 	supported_feat = llq_features->header_location_ctrl_supported;
663 
664 	if (likely(supported_feat & llq_default_cfg->llq_header_location)) {
665 		llq_info->header_location_ctrl =
666 			llq_default_cfg->llq_header_location;
667 	} else {
668 		ena_trc_err(ena_dev, "Invalid header location control, supported: 0x%x\n",
669 			    supported_feat);
670 		return ENA_COM_INVAL;
671 	}
672 
673 	if (likely(llq_info->header_location_ctrl == ENA_ADMIN_INLINE_HEADER)) {
674 		supported_feat = llq_features->descriptors_stride_ctrl_supported;
675 		if (likely(supported_feat & llq_default_cfg->llq_stride_ctrl)) {
676 			llq_info->desc_stride_ctrl = llq_default_cfg->llq_stride_ctrl;
677 		} else	{
678 			if (supported_feat & ENA_ADMIN_MULTIPLE_DESCS_PER_ENTRY) {
679 				llq_info->desc_stride_ctrl = ENA_ADMIN_MULTIPLE_DESCS_PER_ENTRY;
680 			} else if (supported_feat & ENA_ADMIN_SINGLE_DESC_PER_ENTRY) {
681 				llq_info->desc_stride_ctrl = ENA_ADMIN_SINGLE_DESC_PER_ENTRY;
682 			} else {
683 				ena_trc_err(ena_dev, "Invalid desc_stride_ctrl, supported: 0x%x\n",
684 					    supported_feat);
685 				return ENA_COM_INVAL;
686 			}
687 
688 			ena_trc_err(ena_dev, "Default llq stride ctrl is not supported, performing fallback, default: 0x%x, supported: 0x%x, used: 0x%x\n",
689 				    llq_default_cfg->llq_stride_ctrl,
690 				    supported_feat,
691 				    llq_info->desc_stride_ctrl);
692 		}
693 	} else {
694 		llq_info->desc_stride_ctrl = 0;
695 	}
696 
697 	supported_feat = llq_features->entry_size_ctrl_supported;
698 	if (likely(supported_feat & llq_default_cfg->llq_ring_entry_size)) {
699 		llq_info->desc_list_entry_size_ctrl = llq_default_cfg->llq_ring_entry_size;
700 		llq_info->desc_list_entry_size = llq_default_cfg->llq_ring_entry_size_value;
701 	} else {
702 		if (supported_feat & ENA_ADMIN_LIST_ENTRY_SIZE_128B) {
703 			llq_info->desc_list_entry_size_ctrl = ENA_ADMIN_LIST_ENTRY_SIZE_128B;
704 			llq_info->desc_list_entry_size = 128;
705 		} else if (supported_feat & ENA_ADMIN_LIST_ENTRY_SIZE_192B) {
706 			llq_info->desc_list_entry_size_ctrl = ENA_ADMIN_LIST_ENTRY_SIZE_192B;
707 			llq_info->desc_list_entry_size = 192;
708 		} else if (supported_feat & ENA_ADMIN_LIST_ENTRY_SIZE_256B) {
709 			llq_info->desc_list_entry_size_ctrl = ENA_ADMIN_LIST_ENTRY_SIZE_256B;
710 			llq_info->desc_list_entry_size = 256;
711 		} else {
712 			ena_trc_err(ena_dev, "Invalid entry_size_ctrl, supported: 0x%x\n",
713 				    supported_feat);
714 			return ENA_COM_INVAL;
715 		}
716 
717 		ena_trc_err(ena_dev, "Default llq ring entry size is not supported, performing fallback, default: 0x%x, supported: 0x%x, used: 0x%x\n",
718 			    llq_default_cfg->llq_ring_entry_size,
719 			    supported_feat,
720 			    llq_info->desc_list_entry_size);
721 	}
722 	if (unlikely(llq_info->desc_list_entry_size & 0x7)) {
723 		/* The desc list entry size should be whole multiply of 8
724 		 * This requirement comes from __iowrite64_copy()
725 		 */
726 		ena_trc_err(ena_dev, "Illegal entry size %d\n",
727 			    llq_info->desc_list_entry_size);
728 		return ENA_COM_INVAL;
729 	}
730 
731 	if (llq_info->desc_stride_ctrl == ENA_ADMIN_MULTIPLE_DESCS_PER_ENTRY)
732 		llq_info->descs_per_entry = llq_info->desc_list_entry_size /
733 			sizeof(struct ena_eth_io_tx_desc);
734 	else
735 		llq_info->descs_per_entry = 1;
736 
737 	supported_feat = llq_features->desc_num_before_header_supported;
738 	if (likely(supported_feat & llq_default_cfg->llq_num_decs_before_header)) {
739 		llq_info->descs_num_before_header = llq_default_cfg->llq_num_decs_before_header;
740 	} else {
741 		if (supported_feat & ENA_ADMIN_LLQ_NUM_DESCS_BEFORE_HEADER_2) {
742 			llq_info->descs_num_before_header = ENA_ADMIN_LLQ_NUM_DESCS_BEFORE_HEADER_2;
743 		} else if (supported_feat & ENA_ADMIN_LLQ_NUM_DESCS_BEFORE_HEADER_1) {
744 			llq_info->descs_num_before_header = ENA_ADMIN_LLQ_NUM_DESCS_BEFORE_HEADER_1;
745 		} else if (supported_feat & ENA_ADMIN_LLQ_NUM_DESCS_BEFORE_HEADER_4) {
746 			llq_info->descs_num_before_header = ENA_ADMIN_LLQ_NUM_DESCS_BEFORE_HEADER_4;
747 		} else if (supported_feat & ENA_ADMIN_LLQ_NUM_DESCS_BEFORE_HEADER_8) {
748 			llq_info->descs_num_before_header = ENA_ADMIN_LLQ_NUM_DESCS_BEFORE_HEADER_8;
749 		} else {
750 			ena_trc_err(ena_dev, "Invalid descs_num_before_header, supported: 0x%x\n",
751 				    supported_feat);
752 			return ENA_COM_INVAL;
753 		}
754 
755 		ena_trc_err(ena_dev, "Default llq num descs before header is not supported, performing fallback, default: 0x%x, supported: 0x%x, used: 0x%x\n",
756 			    llq_default_cfg->llq_num_decs_before_header,
757 			    supported_feat,
758 			    llq_info->descs_num_before_header);
759 	}
760 	/* Check for accelerated queue supported */
761 	llq_accel_mode_get = llq_features->accel_mode.u.get;
762 
763 	llq_info->disable_meta_caching =
764 		!!(llq_accel_mode_get.supported_flags &
765 		   BIT(ENA_ADMIN_DISABLE_META_CACHING));
766 
767 	if (llq_accel_mode_get.supported_flags & BIT(ENA_ADMIN_LIMIT_TX_BURST))
768 		llq_info->max_entries_in_tx_burst =
769 			llq_accel_mode_get.max_tx_burst_size /
770 			llq_default_cfg->llq_ring_entry_size_value;
771 
772 	rc = ena_com_set_llq(ena_dev);
773 	if (unlikely(rc))
774 		ena_trc_err(ena_dev, "Cannot set LLQ configuration: %d\n", rc);
775 
776 	return rc;
777 }
778 
ena_com_wait_and_process_admin_cq_interrupts(struct ena_comp_ctx * comp_ctx,struct ena_com_admin_queue * admin_queue)779 static int ena_com_wait_and_process_admin_cq_interrupts(struct ena_comp_ctx *comp_ctx,
780 							struct ena_com_admin_queue *admin_queue)
781 {
782 	unsigned long flags = 0;
783 	int ret;
784 
785 	ENA_WAIT_EVENT_WAIT(comp_ctx->wait_event,
786 			    admin_queue->completion_timeout);
787 
788 	/* In case the command wasn't completed find out the root cause.
789 	 * There might be 2 kinds of errors
790 	 * 1) No completion (timeout reached)
791 	 * 2) There is completion but the device didn't get any msi-x interrupt.
792 	 */
793 	if (unlikely(comp_ctx->status == ENA_CMD_SUBMITTED)) {
794 		ENA_SPINLOCK_LOCK(admin_queue->q_lock, flags);
795 		ena_com_handle_admin_completion(admin_queue);
796 		admin_queue->stats.no_completion++;
797 		ENA_SPINLOCK_UNLOCK(admin_queue->q_lock, flags);
798 
799 		if (comp_ctx->status == ENA_CMD_COMPLETED) {
800 			admin_queue->is_missing_admin_interrupt = true;
801 			ena_trc_err(admin_queue->ena_dev,
802 				    "The ena device sent a completion but the driver didn't receive a MSI-X interrupt (cmd %d), autopolling mode is %s\n",
803 				    comp_ctx->cmd_opcode, admin_queue->auto_polling ? "ON" : "OFF");
804 			/* Check if fallback to polling is enabled */
805 			if (admin_queue->auto_polling)
806 				admin_queue->polling = true;
807 		} else {
808 			ena_trc_err(admin_queue->ena_dev,
809 				    "The ena device didn't send a completion for the admin cmd %d status %d\n",
810 				    comp_ctx->cmd_opcode, comp_ctx->status);
811 		}
812 		/* Check if shifted to polling mode.
813 		 * This will happen if there is a completion without an interrupt
814 		 * and autopolling mode is enabled. Continuing normal execution in such case
815 		 */
816 		if (!admin_queue->polling) {
817 			admin_queue->running_state = false;
818 			ret = ENA_COM_TIMER_EXPIRED;
819 			goto err;
820 		}
821 	} else if (unlikely(comp_ctx->status == ENA_CMD_ABORTED)) {
822 		ena_trc_err(admin_queue->ena_dev, "Command was aborted\n");
823 		ENA_SPINLOCK_LOCK(admin_queue->q_lock, flags);
824 		admin_queue->stats.aborted_cmd++;
825 		ENA_SPINLOCK_UNLOCK(admin_queue->q_lock, flags);
826 		ret = ENA_COM_NO_DEVICE;
827 		goto err;
828 	}
829 
830 	ret = ena_com_comp_status_to_errno(admin_queue, comp_ctx->comp_status);
831 err:
832 	comp_ctxt_release(admin_queue, comp_ctx);
833 	return ret;
834 }
835 
836 /* This method read the hardware device register through posting writes
837  * and waiting for response
838  * On timeout the function will return ENA_MMIO_READ_TIMEOUT
839  */
ena_com_reg_bar_read32(struct ena_com_dev * ena_dev,u16 offset)840 static u32 ena_com_reg_bar_read32(struct ena_com_dev *ena_dev, u16 offset)
841 {
842 	struct ena_com_mmio_read *mmio_read = &ena_dev->mmio_read;
843 	volatile struct ena_admin_ena_mmio_req_read_less_resp *read_resp =
844 		mmio_read->read_resp;
845 	u32 mmio_read_reg, ret, i;
846 	unsigned long flags = 0;
847 	u32 timeout = mmio_read->reg_read_to;
848 
849 	ENA_MIGHT_SLEEP();
850 
851 	if (timeout == 0)
852 		timeout = ENA_REG_READ_TIMEOUT;
853 
854 	/* If readless is disabled, perform regular read */
855 	if (!mmio_read->readless_supported)
856 		return ENA_REG_READ32(ena_dev->bus, ena_dev->reg_bar + offset);
857 
858 	ENA_SPINLOCK_LOCK(mmio_read->lock, flags);
859 	mmio_read->seq_num++;
860 
861 	read_resp->req_id = mmio_read->seq_num + 0xDEAD;
862 	mmio_read_reg = ENA_FIELD_PREP(offset,
863 				       ENA_REGS_MMIO_REG_READ_REG_OFF_MASK,
864 				       ENA_REGS_MMIO_REG_READ_REG_OFF_SHIFT);
865 	mmio_read_reg |= mmio_read->seq_num &
866 			ENA_REGS_MMIO_REG_READ_REQ_ID_MASK;
867 
868 	ENA_REG_WRITE32(ena_dev->bus, mmio_read_reg,
869 			ena_dev->reg_bar + ENA_REGS_MMIO_REG_READ_OFF);
870 
871 	for (i = 0; i < timeout; i++) {
872 		if (READ_ONCE16(read_resp->req_id) == mmio_read->seq_num)
873 			break;
874 
875 		ENA_UDELAY(1);
876 	}
877 
878 	if (unlikely(i == timeout)) {
879 		ena_trc_err(ena_dev, "Reading reg failed for timeout. expected: req id[%u] offset[%u] actual: req id[%u] offset[%u]\n",
880 			    mmio_read->seq_num,
881 			    offset,
882 			    read_resp->req_id,
883 			    read_resp->reg_off);
884 		ret = ENA_MMIO_READ_TIMEOUT;
885 		goto err;
886 	}
887 
888 	if (unlikely(read_resp->reg_off != offset)) {
889 		ena_trc_err(ena_dev, "Read failure: wrong offset provided\n");
890 		ret = ENA_MMIO_READ_TIMEOUT;
891 	} else {
892 		ret = read_resp->reg_val;
893 	}
894 err:
895 	ENA_SPINLOCK_UNLOCK(mmio_read->lock, flags);
896 
897 	return ret;
898 }
899 
900 /* There are two types to wait for completion.
901  * Polling mode - wait until the completion is available.
902  * Async mode - wait on wait queue until the completion is ready
903  * (or the timeout expired).
904  * It is expected that the IRQ called ena_com_handle_admin_completion
905  * to mark the completions.
906  */
ena_com_wait_and_process_admin_cq(struct ena_comp_ctx * comp_ctx,struct ena_com_admin_queue * admin_queue)907 static int ena_com_wait_and_process_admin_cq(struct ena_comp_ctx *comp_ctx,
908 					     struct ena_com_admin_queue *admin_queue)
909 {
910 	if (admin_queue->polling)
911 		return ena_com_wait_and_process_admin_cq_polling(comp_ctx,
912 								 admin_queue);
913 
914 	return ena_com_wait_and_process_admin_cq_interrupts(comp_ctx,
915 							    admin_queue);
916 }
917 
ena_com_destroy_io_sq(struct ena_com_dev * ena_dev,struct ena_com_io_sq * io_sq)918 static int ena_com_destroy_io_sq(struct ena_com_dev *ena_dev,
919 				 struct ena_com_io_sq *io_sq)
920 {
921 	struct ena_com_admin_queue *admin_queue = &ena_dev->admin_queue;
922 	struct ena_admin_aq_destroy_sq_cmd destroy_cmd;
923 	struct ena_admin_acq_destroy_sq_resp_desc destroy_resp;
924 	u8 direction;
925 	int ret;
926 
927 	memset(&destroy_cmd, 0x0, sizeof(destroy_cmd));
928 
929 	if (io_sq->direction == ENA_COM_IO_QUEUE_DIRECTION_TX)
930 		direction = ENA_ADMIN_SQ_DIRECTION_TX;
931 	else
932 		direction = ENA_ADMIN_SQ_DIRECTION_RX;
933 
934 	destroy_cmd.sq.sq_identity |=
935 		ENA_FIELD_PREP(direction,
936 			       ENA_ADMIN_SQ_SQ_DIRECTION_MASK,
937 			       ENA_ADMIN_SQ_SQ_DIRECTION_SHIFT);
938 
939 	destroy_cmd.sq.sq_idx = io_sq->idx;
940 	destroy_cmd.aq_common_descriptor.opcode = ENA_ADMIN_DESTROY_SQ;
941 
942 	ret = ena_com_execute_admin_command(admin_queue,
943 					    (struct ena_admin_aq_entry *)&destroy_cmd,
944 					    sizeof(destroy_cmd),
945 					    (struct ena_admin_acq_entry *)&destroy_resp,
946 					    sizeof(destroy_resp));
947 
948 	if (unlikely(ret && (ret != ENA_COM_NO_DEVICE)))
949 		ena_trc_err(ena_dev, "Failed to destroy io sq error: %d\n", ret);
950 
951 	return ret;
952 }
953 
ena_com_io_queue_free(struct ena_com_dev * ena_dev,struct ena_com_io_sq * io_sq,struct ena_com_io_cq * io_cq)954 static void ena_com_io_queue_free(struct ena_com_dev *ena_dev,
955 				  struct ena_com_io_sq *io_sq,
956 				  struct ena_com_io_cq *io_cq)
957 {
958 	size_t size;
959 
960 	if (io_cq->cdesc_addr.virt_addr) {
961 		size = io_cq->cdesc_entry_size_in_bytes * io_cq->q_depth;
962 
963 		ENA_MEM_FREE_COHERENT(ena_dev->dmadev,
964 				      size,
965 				      io_cq->cdesc_addr.virt_addr,
966 				      io_cq->cdesc_addr.phys_addr,
967 				      io_cq->cdesc_addr.mem_handle);
968 
969 		io_cq->cdesc_addr.virt_addr = NULL;
970 	}
971 
972 	if (io_sq->desc_addr.virt_addr) {
973 		size = io_sq->desc_entry_size * io_sq->q_depth;
974 
975 		ENA_MEM_FREE_COHERENT(ena_dev->dmadev,
976 				      size,
977 				      io_sq->desc_addr.virt_addr,
978 				      io_sq->desc_addr.phys_addr,
979 				      io_sq->desc_addr.mem_handle);
980 
981 		io_sq->desc_addr.virt_addr = NULL;
982 	}
983 
984 	if (io_sq->bounce_buf_ctrl.base_buffer) {
985 		ENA_MEM_FREE(ena_dev->dmadev,
986 			     io_sq->bounce_buf_ctrl.base_buffer,
987 			     (io_sq->llq_info.desc_list_entry_size * ENA_COM_BOUNCE_BUFFER_CNTRL_CNT));
988 		io_sq->bounce_buf_ctrl.base_buffer = NULL;
989 	}
990 }
991 
wait_for_reset_state(struct ena_com_dev * ena_dev,u32 timeout,u16 exp_state)992 static int wait_for_reset_state(struct ena_com_dev *ena_dev, u32 timeout,
993 				u16 exp_state)
994 {
995 	u32 val, exp = 0;
996 	ena_time_t timeout_stamp;
997 
998 	/* Convert timeout from resolution of 100ms to us resolution. */
999 	timeout_stamp = ENA_GET_SYSTEM_TIMEOUT(100 * 1000 * timeout);
1000 
1001 	while (1) {
1002 		val = ena_com_reg_bar_read32(ena_dev, ENA_REGS_DEV_STS_OFF);
1003 
1004 		if (unlikely(val == ENA_MMIO_READ_TIMEOUT)) {
1005 			ena_trc_err(ena_dev, "Reg read timeout occurred\n");
1006 			return ENA_COM_TIMER_EXPIRED;
1007 		}
1008 
1009 		if ((val & ENA_REGS_DEV_STS_RESET_IN_PROGRESS_MASK) ==
1010 			exp_state)
1011 			return 0;
1012 
1013 		if (unlikely(ENA_TIME_EXPIRE(timeout_stamp)))
1014 			return ENA_COM_TIMER_EXPIRED;
1015 
1016 		ena_delay_exponential_backoff_us(exp++, ena_dev->ena_min_poll_delay_us);
1017 	}
1018 }
1019 
ena_com_check_supported_feature_id(struct ena_com_dev * ena_dev,enum ena_admin_aq_feature_id feature_id)1020 static bool ena_com_check_supported_feature_id(struct ena_com_dev *ena_dev,
1021 					       enum ena_admin_aq_feature_id feature_id)
1022 {
1023 	u32 feature_mask = 1 << feature_id;
1024 
1025 	/* Device attributes is always supported */
1026 	if ((feature_id != ENA_ADMIN_DEVICE_ATTRIBUTES) &&
1027 	    !(ena_dev->supported_features & feature_mask))
1028 		return false;
1029 
1030 	return true;
1031 }
1032 
ena_com_get_feature_ex(struct ena_com_dev * ena_dev,struct ena_admin_get_feat_resp * get_resp,enum ena_admin_aq_feature_id feature_id,dma_addr_t control_buf_dma_addr,u32 control_buff_size,u8 feature_ver)1033 static int ena_com_get_feature_ex(struct ena_com_dev *ena_dev,
1034 				  struct ena_admin_get_feat_resp *get_resp,
1035 				  enum ena_admin_aq_feature_id feature_id,
1036 				  dma_addr_t control_buf_dma_addr,
1037 				  u32 control_buff_size,
1038 				  u8 feature_ver)
1039 {
1040 	struct ena_com_admin_queue *admin_queue;
1041 	struct ena_admin_get_feat_cmd get_cmd;
1042 	int ret;
1043 
1044 	if (!ena_com_check_supported_feature_id(ena_dev, feature_id)) {
1045 		ena_trc_dbg(ena_dev, "Feature %d isn't supported\n", feature_id);
1046 		return ENA_COM_UNSUPPORTED;
1047 	}
1048 
1049 	memset(&get_cmd, 0x0, sizeof(get_cmd));
1050 	admin_queue = &ena_dev->admin_queue;
1051 
1052 	get_cmd.aq_common_descriptor.opcode = ENA_ADMIN_GET_FEATURE;
1053 
1054 	if (control_buff_size)
1055 		get_cmd.aq_common_descriptor.flags =
1056 			ENA_ADMIN_AQ_COMMON_DESC_CTRL_DATA_INDIRECT_MASK;
1057 	else
1058 		get_cmd.aq_common_descriptor.flags = 0;
1059 
1060 	ret = ena_com_mem_addr_set(ena_dev,
1061 				   &get_cmd.control_buffer.address,
1062 				   control_buf_dma_addr);
1063 	if (unlikely(ret)) {
1064 		ena_trc_err(ena_dev, "Memory address set failed\n");
1065 		return ret;
1066 	}
1067 
1068 	get_cmd.control_buffer.length = control_buff_size;
1069 	get_cmd.feat_common.feature_version = feature_ver;
1070 	get_cmd.feat_common.feature_id = feature_id;
1071 
1072 	ret = ena_com_execute_admin_command(admin_queue,
1073 					    (struct ena_admin_aq_entry *)
1074 					    &get_cmd,
1075 					    sizeof(get_cmd),
1076 					    (struct ena_admin_acq_entry *)
1077 					    get_resp,
1078 					    sizeof(*get_resp));
1079 
1080 	if (unlikely(ret))
1081 		ena_trc_err(ena_dev, "Failed to submit get_feature command %d error: %d\n",
1082 			    feature_id, ret);
1083 
1084 	return ret;
1085 }
1086 
ena_com_get_feature(struct ena_com_dev * ena_dev,struct ena_admin_get_feat_resp * get_resp,enum ena_admin_aq_feature_id feature_id,u8 feature_ver)1087 static int ena_com_get_feature(struct ena_com_dev *ena_dev,
1088 			       struct ena_admin_get_feat_resp *get_resp,
1089 			       enum ena_admin_aq_feature_id feature_id,
1090 			       u8 feature_ver)
1091 {
1092 	return ena_com_get_feature_ex(ena_dev,
1093 				      get_resp,
1094 				      feature_id,
1095 				      0,
1096 				      0,
1097 				      feature_ver);
1098 }
1099 
ena_com_get_current_hash_function(struct ena_com_dev * ena_dev)1100 int ena_com_get_current_hash_function(struct ena_com_dev *ena_dev)
1101 {
1102 	return ena_dev->rss.hash_func;
1103 }
1104 
ena_com_hash_key_fill_default_key(struct ena_com_dev * ena_dev)1105 static void ena_com_hash_key_fill_default_key(struct ena_com_dev *ena_dev)
1106 {
1107 	struct ena_admin_feature_rss_flow_hash_control *hash_key =
1108 		(ena_dev->rss).hash_key;
1109 
1110 	ENA_RSS_FILL_KEY(&hash_key->key, sizeof(hash_key->key));
1111 	/* The key buffer is stored in the device in an array of
1112 	 * uint32 elements.
1113 	 */
1114 	hash_key->key_parts = ENA_ADMIN_RSS_KEY_PARTS;
1115 }
1116 
ena_com_hash_key_allocate(struct ena_com_dev * ena_dev)1117 static int ena_com_hash_key_allocate(struct ena_com_dev *ena_dev)
1118 {
1119 	struct ena_rss *rss = &ena_dev->rss;
1120 
1121 	if (!ena_com_check_supported_feature_id(ena_dev, ENA_ADMIN_RSS_HASH_FUNCTION))
1122 		return ENA_COM_UNSUPPORTED;
1123 
1124 	ENA_MEM_ALLOC_COHERENT(ena_dev->dmadev,
1125 			       sizeof(*rss->hash_key),
1126 			       rss->hash_key,
1127 			       rss->hash_key_dma_addr,
1128 			       rss->hash_key_mem_handle);
1129 
1130 	if (unlikely(!rss->hash_key))
1131 		return ENA_COM_NO_MEM;
1132 
1133 	return 0;
1134 }
1135 
ena_com_hash_key_destroy(struct ena_com_dev * ena_dev)1136 static void ena_com_hash_key_destroy(struct ena_com_dev *ena_dev)
1137 {
1138 	struct ena_rss *rss = &ena_dev->rss;
1139 
1140 	if (rss->hash_key)
1141 		ENA_MEM_FREE_COHERENT(ena_dev->dmadev,
1142 				      sizeof(*rss->hash_key),
1143 				      rss->hash_key,
1144 				      rss->hash_key_dma_addr,
1145 				      rss->hash_key_mem_handle);
1146 	rss->hash_key = NULL;
1147 }
1148 
ena_com_hash_ctrl_init(struct ena_com_dev * ena_dev)1149 static int ena_com_hash_ctrl_init(struct ena_com_dev *ena_dev)
1150 {
1151 	struct ena_rss *rss = &ena_dev->rss;
1152 
1153 	ENA_MEM_ALLOC_COHERENT(ena_dev->dmadev,
1154 			       sizeof(*rss->hash_ctrl),
1155 			       rss->hash_ctrl,
1156 			       rss->hash_ctrl_dma_addr,
1157 			       rss->hash_ctrl_mem_handle);
1158 
1159 	if (unlikely(!rss->hash_ctrl))
1160 		return ENA_COM_NO_MEM;
1161 
1162 	return 0;
1163 }
1164 
ena_com_hash_ctrl_destroy(struct ena_com_dev * ena_dev)1165 static void ena_com_hash_ctrl_destroy(struct ena_com_dev *ena_dev)
1166 {
1167 	struct ena_rss *rss = &ena_dev->rss;
1168 
1169 	if (rss->hash_ctrl)
1170 		ENA_MEM_FREE_COHERENT(ena_dev->dmadev,
1171 				      sizeof(*rss->hash_ctrl),
1172 				      rss->hash_ctrl,
1173 				      rss->hash_ctrl_dma_addr,
1174 				      rss->hash_ctrl_mem_handle);
1175 	rss->hash_ctrl = NULL;
1176 }
1177 
ena_com_indirect_table_allocate(struct ena_com_dev * ena_dev,u16 log_size)1178 static int ena_com_indirect_table_allocate(struct ena_com_dev *ena_dev,
1179 					   u16 log_size)
1180 {
1181 	struct ena_rss *rss = &ena_dev->rss;
1182 	struct ena_admin_get_feat_resp get_resp;
1183 	size_t tbl_size;
1184 	int ret;
1185 
1186 	ret = ena_com_get_feature(ena_dev, &get_resp,
1187 				  ENA_ADMIN_RSS_INDIRECTION_TABLE_CONFIG, 0);
1188 	if (unlikely(ret))
1189 		return ret;
1190 
1191 	if ((get_resp.u.ind_table.min_size > log_size) ||
1192 	    (get_resp.u.ind_table.max_size < log_size)) {
1193 		ena_trc_err(ena_dev, "Indirect table size doesn't fit. requested size: %d while min is:%d and max %d\n",
1194 			    1 << log_size,
1195 			    1 << get_resp.u.ind_table.min_size,
1196 			    1 << get_resp.u.ind_table.max_size);
1197 		return ENA_COM_INVAL;
1198 	}
1199 
1200 	tbl_size = (1ULL << log_size) *
1201 		sizeof(struct ena_admin_rss_ind_table_entry);
1202 
1203 	ENA_MEM_ALLOC_COHERENT(ena_dev->dmadev,
1204 			     tbl_size,
1205 			     rss->rss_ind_tbl,
1206 			     rss->rss_ind_tbl_dma_addr,
1207 			     rss->rss_ind_tbl_mem_handle);
1208 	if (unlikely(!rss->rss_ind_tbl))
1209 		goto mem_err1;
1210 
1211 	tbl_size = (1ULL << log_size) * sizeof(u16);
1212 	rss->host_rss_ind_tbl =
1213 		ENA_MEM_ALLOC(ena_dev->dmadev, tbl_size);
1214 	if (unlikely(!rss->host_rss_ind_tbl))
1215 		goto mem_err2;
1216 
1217 	rss->tbl_log_size = log_size;
1218 
1219 	return 0;
1220 
1221 mem_err2:
1222 	tbl_size = (1ULL << log_size) *
1223 		sizeof(struct ena_admin_rss_ind_table_entry);
1224 
1225 	ENA_MEM_FREE_COHERENT(ena_dev->dmadev,
1226 			      tbl_size,
1227 			      rss->rss_ind_tbl,
1228 			      rss->rss_ind_tbl_dma_addr,
1229 			      rss->rss_ind_tbl_mem_handle);
1230 	rss->rss_ind_tbl = NULL;
1231 mem_err1:
1232 	rss->tbl_log_size = 0;
1233 	return ENA_COM_NO_MEM;
1234 }
1235 
ena_com_indirect_table_destroy(struct ena_com_dev * ena_dev)1236 static void ena_com_indirect_table_destroy(struct ena_com_dev *ena_dev)
1237 {
1238 	struct ena_rss *rss = &ena_dev->rss;
1239 	size_t tbl_size = (1ULL << rss->tbl_log_size) *
1240 		sizeof(struct ena_admin_rss_ind_table_entry);
1241 
1242 	if (rss->rss_ind_tbl)
1243 		ENA_MEM_FREE_COHERENT(ena_dev->dmadev,
1244 				      tbl_size,
1245 				      rss->rss_ind_tbl,
1246 				      rss->rss_ind_tbl_dma_addr,
1247 				      rss->rss_ind_tbl_mem_handle);
1248 	rss->rss_ind_tbl = NULL;
1249 
1250 	if (rss->host_rss_ind_tbl)
1251 		ENA_MEM_FREE(ena_dev->dmadev,
1252 			     rss->host_rss_ind_tbl,
1253 			     ((1ULL << rss->tbl_log_size) * sizeof(u16)));
1254 	rss->host_rss_ind_tbl = NULL;
1255 }
1256 
ena_com_create_io_sq(struct ena_com_dev * ena_dev,struct ena_com_io_sq * io_sq,u16 cq_idx)1257 static int ena_com_create_io_sq(struct ena_com_dev *ena_dev,
1258 				struct ena_com_io_sq *io_sq, u16 cq_idx)
1259 {
1260 	struct ena_com_admin_queue *admin_queue = &ena_dev->admin_queue;
1261 	struct ena_admin_aq_create_sq_cmd create_cmd;
1262 	struct ena_admin_acq_create_sq_resp_desc cmd_completion;
1263 	u8 direction;
1264 	int ret;
1265 
1266 	memset(&create_cmd, 0x0, sizeof(create_cmd));
1267 
1268 	create_cmd.aq_common_descriptor.opcode = ENA_ADMIN_CREATE_SQ;
1269 
1270 	if (io_sq->direction == ENA_COM_IO_QUEUE_DIRECTION_TX)
1271 		direction = ENA_ADMIN_SQ_DIRECTION_TX;
1272 	else
1273 		direction = ENA_ADMIN_SQ_DIRECTION_RX;
1274 
1275 	create_cmd.sq_identity |=
1276 		ENA_FIELD_PREP(direction,
1277 			       ENA_ADMIN_AQ_CREATE_SQ_CMD_SQ_DIRECTION_MASK,
1278 			       ENA_ADMIN_AQ_CREATE_SQ_CMD_SQ_DIRECTION_SHIFT);
1279 
1280 	create_cmd.sq_caps_2 |= io_sq->mem_queue_type &
1281 		ENA_ADMIN_AQ_CREATE_SQ_CMD_PLACEMENT_POLICY_MASK;
1282 
1283 	create_cmd.sq_caps_2 |=
1284 		ENA_FIELD_PREP(ENA_ADMIN_COMPLETION_POLICY_DESC,
1285 			       ENA_ADMIN_AQ_CREATE_SQ_CMD_COMPLETION_POLICY_MASK,
1286 			       ENA_ADMIN_AQ_CREATE_SQ_CMD_COMPLETION_POLICY_SHIFT);
1287 
1288 	create_cmd.sq_caps_3 |=
1289 		ENA_ADMIN_AQ_CREATE_SQ_CMD_IS_PHYSICALLY_CONTIGUOUS_MASK;
1290 
1291 	create_cmd.cq_idx = cq_idx;
1292 	create_cmd.sq_depth = io_sq->q_depth;
1293 
1294 	if (io_sq->mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_HOST) {
1295 		ret = ena_com_mem_addr_set(ena_dev,
1296 					   &create_cmd.sq_ba,
1297 					   io_sq->desc_addr.phys_addr);
1298 		if (unlikely(ret)) {
1299 			ena_trc_err(ena_dev, "Memory address set failed\n");
1300 			return ret;
1301 		}
1302 	}
1303 
1304 	ret = ena_com_execute_admin_command(admin_queue,
1305 					    (struct ena_admin_aq_entry *)&create_cmd,
1306 					    sizeof(create_cmd),
1307 					    (struct ena_admin_acq_entry *)&cmd_completion,
1308 					    sizeof(cmd_completion));
1309 	if (unlikely(ret)) {
1310 		ena_trc_err(ena_dev, "Failed to create IO SQ. error: %d\n", ret);
1311 		return ret;
1312 	}
1313 
1314 	io_sq->idx = cmd_completion.sq_idx;
1315 
1316 	io_sq->db_addr = (u32 __iomem *)((uintptr_t)ena_dev->reg_bar +
1317 		(uintptr_t)cmd_completion.sq_doorbell_offset);
1318 
1319 	if (io_sq->mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_DEV) {
1320 		io_sq->desc_addr.pbuf_dev_addr =
1321 			(u8 __iomem *)((uintptr_t)ena_dev->mem_bar +
1322 			cmd_completion.llq_descriptors_offset);
1323 	}
1324 
1325 	ena_trc_dbg(ena_dev, "Created sq[%u], depth[%u]\n", io_sq->idx, io_sq->q_depth);
1326 
1327 	return ret;
1328 }
1329 
ena_com_ind_tbl_convert_to_device(struct ena_com_dev * ena_dev)1330 static int ena_com_ind_tbl_convert_to_device(struct ena_com_dev *ena_dev)
1331 {
1332 	struct ena_rss *rss = &ena_dev->rss;
1333 	struct ena_com_io_sq *io_sq;
1334 	u16 qid;
1335 	int i;
1336 
1337 	for (i = 0; i < 1 << rss->tbl_log_size; i++) {
1338 		qid = rss->host_rss_ind_tbl[i];
1339 		if (qid >= ENA_TOTAL_NUM_QUEUES)
1340 			return ENA_COM_INVAL;
1341 
1342 		io_sq = &ena_dev->io_sq_queues[qid];
1343 
1344 		if (io_sq->direction != ENA_COM_IO_QUEUE_DIRECTION_RX)
1345 			return ENA_COM_INVAL;
1346 
1347 		rss->rss_ind_tbl[i].cq_idx = io_sq->idx;
1348 	}
1349 
1350 	return 0;
1351 }
1352 
ena_com_update_intr_delay_resolution(struct ena_com_dev * ena_dev,u16 intr_delay_resolution)1353 static void ena_com_update_intr_delay_resolution(struct ena_com_dev *ena_dev,
1354 						 u16 intr_delay_resolution)
1355 {
1356 	u16 prev_intr_delay_resolution = ena_dev->intr_delay_resolution;
1357 
1358 	if (unlikely(!intr_delay_resolution)) {
1359 		ena_trc_err(ena_dev, "Illegal intr_delay_resolution provided. Going to use default 1 usec resolution\n");
1360 		intr_delay_resolution = ENA_DEFAULT_INTR_DELAY_RESOLUTION;
1361 	}
1362 
1363 	/* update Rx */
1364 	ena_dev->intr_moder_rx_interval =
1365 		ena_dev->intr_moder_rx_interval *
1366 		prev_intr_delay_resolution /
1367 		intr_delay_resolution;
1368 
1369 	/* update Tx */
1370 	ena_dev->intr_moder_tx_interval =
1371 		ena_dev->intr_moder_tx_interval *
1372 		prev_intr_delay_resolution /
1373 		intr_delay_resolution;
1374 
1375 	ena_dev->intr_delay_resolution = intr_delay_resolution;
1376 }
1377 
1378 /*****************************************************************************/
1379 /*******************************      API       ******************************/
1380 /*****************************************************************************/
1381 
ena_com_execute_admin_command(struct ena_com_admin_queue * admin_queue,struct ena_admin_aq_entry * cmd,size_t cmd_size,struct ena_admin_acq_entry * comp,size_t comp_size)1382 int ena_com_execute_admin_command(struct ena_com_admin_queue *admin_queue,
1383 				  struct ena_admin_aq_entry *cmd,
1384 				  size_t cmd_size,
1385 				  struct ena_admin_acq_entry *comp,
1386 				  size_t comp_size)
1387 {
1388 	struct ena_comp_ctx *comp_ctx;
1389 	int ret;
1390 
1391 	comp_ctx = ena_com_submit_admin_cmd(admin_queue, cmd, cmd_size,
1392 					    comp, comp_size);
1393 	if (IS_ERR(comp_ctx)) {
1394 		ret = PTR_ERR(comp_ctx);
1395 		if (ret != ENA_COM_NO_DEVICE)
1396 			ena_trc_err(admin_queue->ena_dev,
1397 				    "Failed to submit command [%d]\n",
1398 				    ret);
1399 
1400 		return ret;
1401 	}
1402 
1403 	ret = ena_com_wait_and_process_admin_cq(comp_ctx, admin_queue);
1404 	if (unlikely(ret)) {
1405 		if (admin_queue->running_state)
1406 			ena_trc_err(admin_queue->ena_dev,
1407 				    "Failed to process command [%d]\n",
1408 				    ret);
1409 	}
1410 	return ret;
1411 }
1412 
ena_com_create_io_cq(struct ena_com_dev * ena_dev,struct ena_com_io_cq * io_cq)1413 int ena_com_create_io_cq(struct ena_com_dev *ena_dev,
1414 			 struct ena_com_io_cq *io_cq)
1415 {
1416 	struct ena_com_admin_queue *admin_queue = &ena_dev->admin_queue;
1417 	struct ena_admin_aq_create_cq_cmd create_cmd;
1418 	struct ena_admin_acq_create_cq_resp_desc cmd_completion;
1419 	int ret;
1420 
1421 	memset(&create_cmd, 0x0, sizeof(create_cmd));
1422 
1423 	create_cmd.aq_common_descriptor.opcode = ENA_ADMIN_CREATE_CQ;
1424 
1425 	create_cmd.cq_caps_2 |= (io_cq->cdesc_entry_size_in_bytes / 4) &
1426 		ENA_ADMIN_AQ_CREATE_CQ_CMD_CQ_ENTRY_SIZE_WORDS_MASK;
1427 	create_cmd.cq_caps_1 |=
1428 		ENA_ADMIN_AQ_CREATE_CQ_CMD_INTERRUPT_MODE_ENABLED_MASK;
1429 
1430 	create_cmd.msix_vector = io_cq->msix_vector;
1431 	create_cmd.cq_depth = io_cq->q_depth;
1432 
1433 	ret = ena_com_mem_addr_set(ena_dev,
1434 				   &create_cmd.cq_ba,
1435 				   io_cq->cdesc_addr.phys_addr);
1436 	if (unlikely(ret)) {
1437 		ena_trc_err(ena_dev, "Memory address set failed\n");
1438 		return ret;
1439 	}
1440 
1441 	ret = ena_com_execute_admin_command(admin_queue,
1442 					    (struct ena_admin_aq_entry *)&create_cmd,
1443 					    sizeof(create_cmd),
1444 					    (struct ena_admin_acq_entry *)&cmd_completion,
1445 					    sizeof(cmd_completion));
1446 	if (unlikely(ret)) {
1447 		ena_trc_err(ena_dev, "Failed to create IO CQ. error: %d\n", ret);
1448 		return ret;
1449 	}
1450 
1451 	io_cq->idx = cmd_completion.cq_idx;
1452 
1453 	io_cq->unmask_reg = (u32 __iomem *)((uintptr_t)ena_dev->reg_bar +
1454 		cmd_completion.cq_interrupt_unmask_register_offset);
1455 
1456 	if (cmd_completion.numa_node_register_offset)
1457 		io_cq->numa_node_cfg_reg =
1458 			(u32 __iomem *)((uintptr_t)ena_dev->reg_bar +
1459 			cmd_completion.numa_node_register_offset);
1460 
1461 	ena_trc_dbg(ena_dev, "Created cq[%u], depth[%u]\n", io_cq->idx, io_cq->q_depth);
1462 
1463 	return ret;
1464 }
1465 
ena_com_get_io_handlers(struct ena_com_dev * ena_dev,u16 qid,struct ena_com_io_sq ** io_sq,struct ena_com_io_cq ** io_cq)1466 int ena_com_get_io_handlers(struct ena_com_dev *ena_dev, u16 qid,
1467 			    struct ena_com_io_sq **io_sq,
1468 			    struct ena_com_io_cq **io_cq)
1469 {
1470 	if (unlikely(qid >= ENA_TOTAL_NUM_QUEUES)) {
1471 		ena_trc_err(ena_dev, "Invalid queue number %d but the max is %d\n",
1472 			    qid, ENA_TOTAL_NUM_QUEUES);
1473 		return ENA_COM_INVAL;
1474 	}
1475 
1476 	*io_sq = &ena_dev->io_sq_queues[qid];
1477 	*io_cq = &ena_dev->io_cq_queues[qid];
1478 
1479 	return 0;
1480 }
1481 
ena_com_abort_admin_commands(struct ena_com_dev * ena_dev)1482 void ena_com_abort_admin_commands(struct ena_com_dev *ena_dev)
1483 {
1484 	struct ena_com_admin_queue *admin_queue = &ena_dev->admin_queue;
1485 	struct ena_comp_ctx *comp_ctx;
1486 	u16 i;
1487 
1488 	if (!admin_queue->comp_ctx)
1489 		return;
1490 
1491 	for (i = 0; i < admin_queue->q_depth; i++) {
1492 		comp_ctx = get_comp_ctxt(admin_queue, i, false);
1493 		if (unlikely(!comp_ctx))
1494 			break;
1495 
1496 		comp_ctx->status = ENA_CMD_ABORTED;
1497 
1498 		ENA_WAIT_EVENT_SIGNAL(comp_ctx->wait_event);
1499 	}
1500 }
1501 
ena_com_wait_for_abort_completion(struct ena_com_dev * ena_dev)1502 void ena_com_wait_for_abort_completion(struct ena_com_dev *ena_dev)
1503 {
1504 	struct ena_com_admin_queue *admin_queue = &ena_dev->admin_queue;
1505 	unsigned long flags = 0;
1506 	u32 exp = 0;
1507 
1508 	ENA_SPINLOCK_LOCK(admin_queue->q_lock, flags);
1509 	while (ATOMIC32_READ(&admin_queue->outstanding_cmds) != 0) {
1510 		ENA_SPINLOCK_UNLOCK(admin_queue->q_lock, flags);
1511 		ena_delay_exponential_backoff_us(exp++, ena_dev->ena_min_poll_delay_us);
1512 		ENA_SPINLOCK_LOCK(admin_queue->q_lock, flags);
1513 	}
1514 	ENA_SPINLOCK_UNLOCK(admin_queue->q_lock, flags);
1515 }
1516 
ena_com_destroy_io_cq(struct ena_com_dev * ena_dev,struct ena_com_io_cq * io_cq)1517 int ena_com_destroy_io_cq(struct ena_com_dev *ena_dev,
1518 			  struct ena_com_io_cq *io_cq)
1519 {
1520 	struct ena_com_admin_queue *admin_queue = &ena_dev->admin_queue;
1521 	struct ena_admin_aq_destroy_cq_cmd destroy_cmd;
1522 	struct ena_admin_acq_destroy_cq_resp_desc destroy_resp;
1523 	int ret;
1524 
1525 	memset(&destroy_cmd, 0x0, sizeof(destroy_cmd));
1526 
1527 	destroy_cmd.cq_idx = io_cq->idx;
1528 	destroy_cmd.aq_common_descriptor.opcode = ENA_ADMIN_DESTROY_CQ;
1529 
1530 	ret = ena_com_execute_admin_command(admin_queue,
1531 					    (struct ena_admin_aq_entry *)&destroy_cmd,
1532 					    sizeof(destroy_cmd),
1533 					    (struct ena_admin_acq_entry *)&destroy_resp,
1534 					    sizeof(destroy_resp));
1535 
1536 	if (unlikely(ret && (ret != ENA_COM_NO_DEVICE)))
1537 		ena_trc_err(ena_dev, "Failed to destroy IO CQ. error: %d\n", ret);
1538 
1539 	return ret;
1540 }
1541 
ena_com_get_admin_running_state(struct ena_com_dev * ena_dev)1542 bool ena_com_get_admin_running_state(struct ena_com_dev *ena_dev)
1543 {
1544 	return ena_dev->admin_queue.running_state;
1545 }
1546 
ena_com_set_admin_running_state(struct ena_com_dev * ena_dev,bool state)1547 void ena_com_set_admin_running_state(struct ena_com_dev *ena_dev, bool state)
1548 {
1549 	struct ena_com_admin_queue *admin_queue = &ena_dev->admin_queue;
1550 	unsigned long flags = 0;
1551 
1552 	ENA_SPINLOCK_LOCK(admin_queue->q_lock, flags);
1553 	ena_dev->admin_queue.running_state = state;
1554 	ENA_SPINLOCK_UNLOCK(admin_queue->q_lock, flags);
1555 }
1556 
ena_com_admin_aenq_enable(struct ena_com_dev * ena_dev)1557 void ena_com_admin_aenq_enable(struct ena_com_dev *ena_dev)
1558 {
1559 	u16 depth = ena_dev->aenq.q_depth;
1560 
1561 	ENA_WARN(ena_dev->aenq.head != depth, ena_dev, "Invalid AENQ state\n");
1562 
1563 	/* Init head_db to mark that all entries in the queue
1564 	 * are initially available
1565 	 */
1566 	ENA_REG_WRITE32(ena_dev->bus, depth, ena_dev->reg_bar + ENA_REGS_AENQ_HEAD_DB_OFF);
1567 }
1568 
ena_com_set_aenq_config(struct ena_com_dev * ena_dev,u32 groups_flag)1569 int ena_com_set_aenq_config(struct ena_com_dev *ena_dev, u32 groups_flag)
1570 {
1571 	struct ena_com_admin_queue *admin_queue;
1572 	struct ena_admin_set_feat_cmd cmd;
1573 	struct ena_admin_set_feat_resp resp;
1574 	struct ena_admin_get_feat_resp get_resp;
1575 	int ret;
1576 
1577 	ret = ena_com_get_feature(ena_dev, &get_resp, ENA_ADMIN_AENQ_CONFIG, 0);
1578 	if (unlikely(ret)) {
1579 		ena_trc_info(ena_dev, "Can't get aenq configuration\n");
1580 		return ret;
1581 	}
1582 
1583 	if ((get_resp.u.aenq.supported_groups & groups_flag) != groups_flag) {
1584 		ena_trc_warn(ena_dev, "Trying to set unsupported aenq events. supported flag: 0x%x asked flag: 0x%x\n",
1585 			     get_resp.u.aenq.supported_groups,
1586 			     groups_flag);
1587 		return ENA_COM_UNSUPPORTED;
1588 	}
1589 
1590 	memset(&cmd, 0x0, sizeof(cmd));
1591 	admin_queue = &ena_dev->admin_queue;
1592 
1593 	cmd.aq_common_descriptor.opcode = ENA_ADMIN_SET_FEATURE;
1594 	cmd.aq_common_descriptor.flags = 0;
1595 	cmd.feat_common.feature_id = ENA_ADMIN_AENQ_CONFIG;
1596 	cmd.u.aenq.enabled_groups = groups_flag;
1597 
1598 	ret = ena_com_execute_admin_command(admin_queue,
1599 					    (struct ena_admin_aq_entry *)&cmd,
1600 					    sizeof(cmd),
1601 					    (struct ena_admin_acq_entry *)&resp,
1602 					    sizeof(resp));
1603 
1604 	if (unlikely(ret))
1605 		ena_trc_err(ena_dev, "Failed to config AENQ ret: %d\n", ret);
1606 
1607 	return ret;
1608 }
1609 
ena_com_get_dma_width(struct ena_com_dev * ena_dev)1610 int ena_com_get_dma_width(struct ena_com_dev *ena_dev)
1611 {
1612 	u32 caps = ena_com_reg_bar_read32(ena_dev, ENA_REGS_CAPS_OFF);
1613 	u32 width;
1614 
1615 	if (unlikely(caps == ENA_MMIO_READ_TIMEOUT)) {
1616 		ena_trc_err(ena_dev, "Reg read timeout occurred\n");
1617 		return ENA_COM_TIMER_EXPIRED;
1618 	}
1619 
1620 	width = ENA_FIELD_GET(caps,
1621 			      ENA_REGS_CAPS_DMA_ADDR_WIDTH_MASK,
1622 			      ENA_REGS_CAPS_DMA_ADDR_WIDTH_SHIFT);
1623 
1624 	ena_trc_dbg(ena_dev, "ENA dma width: %d\n", width);
1625 
1626 	if (unlikely(width < 32 || width > ENA_MAX_PHYS_ADDR_SIZE_BITS)) {
1627 		ena_trc_err(ena_dev, "DMA width illegal value: %d\n", width);
1628 		return ENA_COM_INVAL;
1629 	}
1630 
1631 	ena_dev->dma_addr_bits = width;
1632 
1633 	return width;
1634 }
1635 
ena_com_validate_version(struct ena_com_dev * ena_dev)1636 int ena_com_validate_version(struct ena_com_dev *ena_dev)
1637 {
1638 	u32 ver;
1639 	u32 ctrl_ver;
1640 	u32 ctrl_ver_masked;
1641 
1642 	/* Make sure the ENA version and the controller version are at least
1643 	 * as the driver expects
1644 	 */
1645 	ver = ena_com_reg_bar_read32(ena_dev, ENA_REGS_VERSION_OFF);
1646 	ctrl_ver = ena_com_reg_bar_read32(ena_dev,
1647 					  ENA_REGS_CONTROLLER_VERSION_OFF);
1648 
1649 	if (unlikely((ver == ENA_MMIO_READ_TIMEOUT) ||
1650 		     (ctrl_ver == ENA_MMIO_READ_TIMEOUT))) {
1651 		ena_trc_err(ena_dev, "Reg read timeout occurred\n");
1652 		return ENA_COM_TIMER_EXPIRED;
1653 	}
1654 
1655 	ena_trc_info(ena_dev, "ENA device version: %d.%d\n",
1656 		     ENA_FIELD_GET(ver,
1657 				   ENA_REGS_VERSION_MAJOR_VERSION_MASK,
1658 				   ENA_REGS_VERSION_MAJOR_VERSION_SHIFT),
1659 		     ENA_FIELD_GET(ver,
1660 				   ENA_REGS_VERSION_MINOR_VERSION_MASK,
1661 				   ENA_ZERO_SHIFT));
1662 
1663 	ena_trc_info(ena_dev, "ENA controller version: %d.%d.%d implementation version %d\n",
1664 		     ENA_FIELD_GET(ctrl_ver,
1665 				   ENA_REGS_CONTROLLER_VERSION_MAJOR_VERSION_MASK,
1666 				   ENA_REGS_CONTROLLER_VERSION_MAJOR_VERSION_SHIFT),
1667 		     ENA_FIELD_GET(ctrl_ver,
1668 				   ENA_REGS_CONTROLLER_VERSION_MINOR_VERSION_MASK,
1669 				   ENA_REGS_CONTROLLER_VERSION_MINOR_VERSION_SHIFT),
1670 		     ENA_FIELD_GET(ctrl_ver,
1671 				   ENA_REGS_CONTROLLER_VERSION_SUBMINOR_VERSION_MASK,
1672 				   ENA_ZERO_SHIFT),
1673 		     ENA_FIELD_GET(ctrl_ver,
1674 				   ENA_REGS_CONTROLLER_VERSION_IMPL_ID_MASK,
1675 				   ENA_REGS_CONTROLLER_VERSION_IMPL_ID_SHIFT));
1676 
1677 	ctrl_ver_masked =
1678 		(ctrl_ver & ENA_REGS_CONTROLLER_VERSION_MAJOR_VERSION_MASK) |
1679 		(ctrl_ver & ENA_REGS_CONTROLLER_VERSION_MINOR_VERSION_MASK) |
1680 		(ctrl_ver & ENA_REGS_CONTROLLER_VERSION_SUBMINOR_VERSION_MASK);
1681 
1682 	/* Validate the ctrl version without the implementation ID */
1683 	if (ctrl_ver_masked < MIN_ENA_CTRL_VER) {
1684 		ena_trc_err(ena_dev, "ENA ctrl version is lower than the minimal ctrl version the driver supports\n");
1685 		return -1;
1686 	}
1687 
1688 	return 0;
1689 }
1690 
1691 static void
ena_com_free_ena_admin_queue_comp_ctx(struct ena_com_dev * ena_dev,struct ena_com_admin_queue * admin_queue)1692 ena_com_free_ena_admin_queue_comp_ctx(struct ena_com_dev *ena_dev,
1693 				      struct ena_com_admin_queue *admin_queue)
1694 
1695 {
1696 	if (!admin_queue->comp_ctx)
1697 		return;
1698 
1699 	ENA_WAIT_EVENTS_DESTROY(admin_queue);
1700 	ENA_MEM_FREE(ena_dev->dmadev,
1701 		     admin_queue->comp_ctx,
1702 		     (admin_queue->q_depth * sizeof(struct ena_comp_ctx)));
1703 
1704 	admin_queue->comp_ctx = NULL;
1705 }
1706 
ena_com_admin_destroy(struct ena_com_dev * ena_dev)1707 void ena_com_admin_destroy(struct ena_com_dev *ena_dev)
1708 {
1709 	struct ena_com_admin_queue *admin_queue = &ena_dev->admin_queue;
1710 	struct ena_com_admin_cq *cq = &admin_queue->cq;
1711 	struct ena_com_admin_sq *sq = &admin_queue->sq;
1712 	struct ena_com_aenq *aenq = &ena_dev->aenq;
1713 	u16 size;
1714 
1715 	ena_com_free_ena_admin_queue_comp_ctx(ena_dev, admin_queue);
1716 
1717 	size = ADMIN_SQ_SIZE(admin_queue->q_depth);
1718 	if (sq->entries)
1719 		ENA_MEM_FREE_COHERENT(ena_dev->dmadev, size, sq->entries,
1720 				      sq->dma_addr, sq->mem_handle);
1721 	sq->entries = NULL;
1722 
1723 	size = ADMIN_CQ_SIZE(admin_queue->q_depth);
1724 	if (cq->entries)
1725 		ENA_MEM_FREE_COHERENT(ena_dev->dmadev, size, cq->entries,
1726 				      cq->dma_addr, cq->mem_handle);
1727 	cq->entries = NULL;
1728 
1729 	size = ADMIN_AENQ_SIZE(aenq->q_depth);
1730 	if (ena_dev->aenq.entries)
1731 		ENA_MEM_FREE_COHERENT(ena_dev->dmadev, size, aenq->entries,
1732 				      aenq->dma_addr, aenq->mem_handle);
1733 	aenq->entries = NULL;
1734 	ENA_SPINLOCK_DESTROY(admin_queue->q_lock);
1735 }
1736 
ena_com_set_admin_polling_mode(struct ena_com_dev * ena_dev,bool polling)1737 void ena_com_set_admin_polling_mode(struct ena_com_dev *ena_dev, bool polling)
1738 {
1739 	u32 mask_value = 0;
1740 
1741 	if (polling)
1742 		mask_value = ENA_REGS_ADMIN_INTR_MASK;
1743 
1744 	ENA_REG_WRITE32(ena_dev->bus, mask_value,
1745 			ena_dev->reg_bar + ENA_REGS_INTR_MASK_OFF);
1746 	ena_dev->admin_queue.polling = polling;
1747 }
1748 
ena_com_get_admin_polling_mode(struct ena_com_dev * ena_dev)1749 bool ena_com_get_admin_polling_mode(struct ena_com_dev *ena_dev)
1750 {
1751 	return ena_dev->admin_queue.polling;
1752 }
1753 
ena_com_set_admin_auto_polling_mode(struct ena_com_dev * ena_dev,bool polling)1754 void ena_com_set_admin_auto_polling_mode(struct ena_com_dev *ena_dev,
1755 					 bool polling)
1756 {
1757 	ena_dev->admin_queue.auto_polling = polling;
1758 }
1759 
ena_com_phc_supported(struct ena_com_dev * ena_dev)1760 bool ena_com_phc_supported(struct ena_com_dev *ena_dev)
1761 {
1762 	return ena_com_check_supported_feature_id(ena_dev, ENA_ADMIN_PHC_CONFIG);
1763 }
1764 
ena_com_phc_init(struct ena_com_dev * ena_dev)1765 int ena_com_phc_init(struct ena_com_dev *ena_dev)
1766 {
1767 	struct ena_com_phc_info *phc = &ena_dev->phc;
1768 
1769 	memset(phc, 0x0, sizeof(*phc));
1770 
1771 	/* Allocate shared mem used PHC timestamp retrieved from device */
1772 	ENA_MEM_ALLOC_COHERENT(ena_dev->dmadev,
1773 			       sizeof(*phc->virt_addr),
1774 			       phc->virt_addr,
1775 			       phc->phys_addr,
1776 			       phc->mem_handle);
1777 	if (unlikely(!phc->virt_addr))
1778 		return ENA_COM_NO_MEM;
1779 
1780 	ENA_SPINLOCK_INIT(phc->lock);
1781 
1782 	phc->virt_addr->req_id = 0;
1783 	phc->virt_addr->timestamp = 0;
1784 
1785 	return 0;
1786 }
1787 
ena_com_phc_config(struct ena_com_dev * ena_dev)1788 int ena_com_phc_config(struct ena_com_dev *ena_dev)
1789 {
1790 	struct ena_com_phc_info *phc = &ena_dev->phc;
1791 	struct ena_admin_get_feat_resp get_feat_resp;
1792 	struct ena_admin_set_feat_resp set_feat_resp;
1793 	struct ena_admin_set_feat_cmd set_feat_cmd;
1794 	int ret = 0;
1795 
1796 	/* Get default device PHC configuration */
1797 	ret = ena_com_get_feature(ena_dev,
1798 				  &get_feat_resp,
1799 				  ENA_ADMIN_PHC_CONFIG,
1800 				  ENA_ADMIN_PHC_FEATURE_VERSION_0);
1801 	if (unlikely(ret)) {
1802 		ena_trc_err(ena_dev, "Failed to get PHC feature configuration, error: %d\n", ret);
1803 		return ret;
1804 	}
1805 
1806 	/* Supporting only PHC V0 (readless mode with error bound) */
1807 	if (get_feat_resp.u.phc.version != ENA_ADMIN_PHC_FEATURE_VERSION_0) {
1808 		ena_trc_err(ena_dev, "Unsupported PHC version (0x%X), error: %d\n",
1809 			    get_feat_resp.u.phc.version,
1810 			    ENA_COM_UNSUPPORTED);
1811 		return ENA_COM_UNSUPPORTED;
1812 	}
1813 
1814 	/* Update PHC doorbell offset according to device value, used to write req_id to PHC bar */
1815 	phc->doorbell_offset = get_feat_resp.u.phc.doorbell_offset;
1816 
1817 	/* Update PHC expire timeout according to device or default driver value */
1818 	phc->expire_timeout_usec = (get_feat_resp.u.phc.expire_timeout_usec) ?
1819 				    get_feat_resp.u.phc.expire_timeout_usec :
1820 				    ENA_PHC_DEFAULT_EXPIRE_TIMEOUT_USEC;
1821 
1822 	/* Update PHC block timeout according to device or default driver value */
1823 	phc->block_timeout_usec = (get_feat_resp.u.phc.block_timeout_usec) ?
1824 				   get_feat_resp.u.phc.block_timeout_usec :
1825 				   ENA_PHC_DEFAULT_BLOCK_TIMEOUT_USEC;
1826 
1827 	/* Sanity check - expire timeout must not exceed block timeout */
1828 	if (phc->expire_timeout_usec > phc->block_timeout_usec)
1829 		phc->expire_timeout_usec = phc->block_timeout_usec;
1830 
1831 	/* Prepare PHC config feature command */
1832 	memset(&set_feat_cmd, 0x0, sizeof(set_feat_cmd));
1833 	set_feat_cmd.aq_common_descriptor.opcode = ENA_ADMIN_SET_FEATURE;
1834 	set_feat_cmd.feat_common.feature_id = ENA_ADMIN_PHC_CONFIG;
1835 	set_feat_cmd.u.phc.output_length = sizeof(*phc->virt_addr);
1836 	ret = ena_com_mem_addr_set(ena_dev, &set_feat_cmd.u.phc.output_address, phc->phys_addr);
1837 	if (unlikely(ret)) {
1838 		ena_trc_err(ena_dev, "Failed setting PHC output address, error: %d\n", ret);
1839 		return ret;
1840 	}
1841 
1842 	/* Send PHC feature command to the device */
1843 	ret = ena_com_execute_admin_command(&ena_dev->admin_queue,
1844 					    (struct ena_admin_aq_entry *)&set_feat_cmd,
1845 					    sizeof(set_feat_cmd),
1846 					    (struct ena_admin_acq_entry *)&set_feat_resp,
1847 					    sizeof(set_feat_resp));
1848 
1849 	if (unlikely(ret)) {
1850 		ena_trc_err(ena_dev, "Failed to enable PHC, error: %d\n", ret);
1851 		return ret;
1852 	}
1853 
1854 	phc->active = true;
1855 	ena_trc_dbg(ena_dev, "PHC is active in the device\n");
1856 
1857 	return ret;
1858 }
1859 
ena_com_phc_destroy(struct ena_com_dev * ena_dev)1860 void ena_com_phc_destroy(struct ena_com_dev *ena_dev)
1861 {
1862 	struct ena_com_phc_info *phc = &ena_dev->phc;
1863 	unsigned long flags = 0;
1864 
1865 	/* In case PHC is not supported by the device, silently exiting */
1866 	if (!phc->virt_addr)
1867 		return;
1868 
1869 	ENA_SPINLOCK_LOCK(phc->lock, flags);
1870 	phc->active = false;
1871 	ENA_SPINLOCK_UNLOCK(phc->lock, flags);
1872 
1873 	ENA_MEM_FREE_COHERENT(ena_dev->dmadev,
1874 			      sizeof(*phc->virt_addr),
1875 			      phc->virt_addr,
1876 			      phc->phys_addr,
1877 			      phc->mem_handle);
1878 	phc->virt_addr = NULL;
1879 
1880 	ENA_SPINLOCK_DESTROY(phc->lock);
1881 }
1882 
ena_com_phc_get_timestamp(struct ena_com_dev * ena_dev,u64 * timestamp)1883 int ena_com_phc_get_timestamp(struct ena_com_dev *ena_dev, u64 *timestamp)
1884 {
1885 	volatile struct ena_admin_phc_resp *read_resp = ena_dev->phc.virt_addr;
1886 	const ena_time_high_res_t zero_system_time = ENA_TIME_INIT_HIGH_RES();
1887 	struct ena_com_phc_info *phc = &ena_dev->phc;
1888 	ena_time_high_res_t expire_time;
1889 	ena_time_high_res_t block_time;
1890 	unsigned long flags = 0;
1891 	int ret = ENA_COM_OK;
1892 
1893 	if (!phc->active) {
1894 		ena_trc_err(ena_dev, "PHC feature is not active in the device\n");
1895 		return ENA_COM_UNSUPPORTED;
1896 	}
1897 
1898 	ENA_SPINLOCK_LOCK(phc->lock, flags);
1899 
1900 	/* Check if PHC is in blocked state */
1901 	if (unlikely(ENA_TIME_COMPARE_HIGH_RES(phc->system_time, zero_system_time))) {
1902 		/* Check if blocking time expired */
1903 		block_time = ENA_GET_SYSTEM_TIMEOUT_HIGH_RES(phc->system_time,
1904 							     phc->block_timeout_usec);
1905 		if (!ENA_TIME_EXPIRE_HIGH_RES(block_time)) {
1906 			/* PHC is still in blocked state, skip PHC request */
1907 			phc->stats.phc_skp++;
1908 			ret = ENA_COM_DEVICE_BUSY;
1909 			goto skip;
1910 		}
1911 
1912 		/* PHC is in active state, update statistics according to req_id and error_flags */
1913 		if ((READ_ONCE16(read_resp->req_id) != phc->req_id) ||
1914 		    (read_resp->error_flags & ENA_PHC_ERROR_FLAGS))
1915 			/* Device didn't update req_id during blocking time or timestamp is invalid,
1916 			 * this indicates on a device error
1917 			 */
1918 			phc->stats.phc_err++;
1919 		else
1920 			/* Device updated req_id during blocking time with valid timestamp */
1921 			phc->stats.phc_exp++;
1922 	}
1923 
1924 	/* Setting relative timeouts */
1925 	phc->system_time = ENA_GET_SYSTEM_TIME_HIGH_RES();
1926 	block_time = ENA_GET_SYSTEM_TIMEOUT_HIGH_RES(phc->system_time, phc->block_timeout_usec);
1927 	expire_time = ENA_GET_SYSTEM_TIMEOUT_HIGH_RES(phc->system_time, phc->expire_timeout_usec);
1928 
1929 	/* We expect the device to return this req_id once the new PHC timestamp is updated */
1930 	phc->req_id++;
1931 
1932 	/* Initialize PHC shared memory with different req_id value to be able to identify once the
1933 	 * device changes it to req_id
1934 	 */
1935 	read_resp->req_id = phc->req_id + ENA_PHC_REQ_ID_OFFSET;
1936 
1937 	/* Writing req_id to PHC bar */
1938 	ENA_REG_WRITE32(ena_dev->bus, phc->req_id, ena_dev->reg_bar + phc->doorbell_offset);
1939 
1940 	/* Stalling until the device updates req_id */
1941 	while (1) {
1942 		if (unlikely(ENA_TIME_EXPIRE_HIGH_RES(expire_time))) {
1943 			/* Gave up waiting for updated req_id, PHC enters into blocked state until
1944 			 * passing blocking time, during this time any get PHC timestamp or
1945 			 * error bound requests will fail with device busy error
1946 			 */
1947 			phc->error_bound = ENA_PHC_MAX_ERROR_BOUND;
1948 			ret = ENA_COM_DEVICE_BUSY;
1949 			break;
1950 		}
1951 
1952 		/* Check if req_id was updated by the device */
1953 		if (READ_ONCE16(read_resp->req_id) != phc->req_id) {
1954 			/* req_id was not updated by the device yet, check again on next loop */
1955 			continue;
1956 		}
1957 
1958 		/* req_id was updated by the device which indicates that PHC timestamp, error_bound
1959 		 * and error_flags are updated too, checking errors before retrieving timestamp and
1960 		 * error_bound values
1961 		 */
1962 		if (unlikely(read_resp->error_flags & ENA_PHC_ERROR_FLAGS)) {
1963 			/* Retrieved timestamp or error bound errors, PHC enters into blocked state
1964 			 * until passing blocking time, during this time any get PHC timestamp or
1965 			 * error bound requests will fail with device busy error
1966 			 */
1967 			phc->error_bound = ENA_PHC_MAX_ERROR_BOUND;
1968 			ret = ENA_COM_DEVICE_BUSY;
1969 			break;
1970 		}
1971 
1972 		/* PHC timestamp value is returned to the caller */
1973 		*timestamp = read_resp->timestamp;
1974 
1975 		/* Error bound value is cached for future retrieval by caller */
1976 		phc->error_bound = read_resp->error_bound;
1977 
1978 		/* Update statistic on valid PHC timestamp retrieval */
1979 		phc->stats.phc_cnt++;
1980 
1981 		/* This indicates PHC state is active */
1982 		phc->system_time = zero_system_time;
1983 		break;
1984 	}
1985 
1986 skip:
1987 	ENA_SPINLOCK_UNLOCK(phc->lock, flags);
1988 
1989 	return ret;
1990 }
1991 
ena_com_phc_get_error_bound(struct ena_com_dev * ena_dev,u32 * error_bound)1992 int ena_com_phc_get_error_bound(struct ena_com_dev *ena_dev, u32 *error_bound)
1993 {
1994 	struct ena_com_phc_info *phc = &ena_dev->phc;
1995 	u32 local_error_bound = phc->error_bound;
1996 
1997 	if (!phc->active) {
1998 		ena_trc_err(ena_dev, "PHC feature is not active in the device\n");
1999 		return ENA_COM_UNSUPPORTED;
2000 	}
2001 
2002 	if (local_error_bound == ENA_PHC_MAX_ERROR_BOUND)
2003 		return ENA_COM_DEVICE_BUSY;
2004 
2005 	*error_bound = local_error_bound;
2006 
2007 	return ENA_COM_OK;
2008 }
2009 
ena_com_mmio_reg_read_request_init(struct ena_com_dev * ena_dev)2010 int ena_com_mmio_reg_read_request_init(struct ena_com_dev *ena_dev)
2011 {
2012 	struct ena_com_mmio_read *mmio_read = &ena_dev->mmio_read;
2013 
2014 	ENA_SPINLOCK_INIT(mmio_read->lock);
2015 	ENA_MEM_ALLOC_COHERENT(ena_dev->dmadev,
2016 			       sizeof(*mmio_read->read_resp),
2017 			       mmio_read->read_resp,
2018 			       mmio_read->read_resp_dma_addr,
2019 			       mmio_read->read_resp_mem_handle);
2020 	if (unlikely(!mmio_read->read_resp))
2021 		goto err;
2022 
2023 	ena_com_mmio_reg_read_request_write_dev_addr(ena_dev);
2024 
2025 	mmio_read->read_resp->req_id = 0x0;
2026 	mmio_read->seq_num = 0x0;
2027 	mmio_read->readless_supported = true;
2028 
2029 	return 0;
2030 
2031 err:
2032 		ENA_SPINLOCK_DESTROY(mmio_read->lock);
2033 		return ENA_COM_NO_MEM;
2034 }
2035 
ena_com_set_mmio_read_mode(struct ena_com_dev * ena_dev,bool readless_supported)2036 void ena_com_set_mmio_read_mode(struct ena_com_dev *ena_dev, bool readless_supported)
2037 {
2038 	struct ena_com_mmio_read *mmio_read = &ena_dev->mmio_read;
2039 
2040 	mmio_read->readless_supported = readless_supported;
2041 }
2042 
ena_com_mmio_reg_read_request_destroy(struct ena_com_dev * ena_dev)2043 void ena_com_mmio_reg_read_request_destroy(struct ena_com_dev *ena_dev)
2044 {
2045 	struct ena_com_mmio_read *mmio_read = &ena_dev->mmio_read;
2046 
2047 	ENA_REG_WRITE32(ena_dev->bus, 0x0, ena_dev->reg_bar + ENA_REGS_MMIO_RESP_LO_OFF);
2048 	ENA_REG_WRITE32(ena_dev->bus, 0x0, ena_dev->reg_bar + ENA_REGS_MMIO_RESP_HI_OFF);
2049 
2050 	ENA_MEM_FREE_COHERENT(ena_dev->dmadev,
2051 			      sizeof(*mmio_read->read_resp),
2052 			      mmio_read->read_resp,
2053 			      mmio_read->read_resp_dma_addr,
2054 			      mmio_read->read_resp_mem_handle);
2055 
2056 	mmio_read->read_resp = NULL;
2057 	ENA_SPINLOCK_DESTROY(mmio_read->lock);
2058 }
2059 
ena_com_mmio_reg_read_request_write_dev_addr(struct ena_com_dev * ena_dev)2060 void ena_com_mmio_reg_read_request_write_dev_addr(struct ena_com_dev *ena_dev)
2061 {
2062 	struct ena_com_mmio_read *mmio_read = &ena_dev->mmio_read;
2063 	u32 addr_low, addr_high;
2064 
2065 	addr_low = ENA_DMA_ADDR_TO_UINT32_LOW(mmio_read->read_resp_dma_addr);
2066 	addr_high = ENA_DMA_ADDR_TO_UINT32_HIGH(mmio_read->read_resp_dma_addr);
2067 
2068 	ENA_REG_WRITE32(ena_dev->bus, addr_low, ena_dev->reg_bar + ENA_REGS_MMIO_RESP_LO_OFF);
2069 	ENA_REG_WRITE32(ena_dev->bus, addr_high, ena_dev->reg_bar + ENA_REGS_MMIO_RESP_HI_OFF);
2070 }
2071 
ena_com_admin_init(struct ena_com_dev * ena_dev,struct ena_aenq_handlers * aenq_handlers)2072 int ena_com_admin_init(struct ena_com_dev *ena_dev,
2073 		       struct ena_aenq_handlers *aenq_handlers)
2074 {
2075 	struct ena_com_admin_queue *admin_queue = &ena_dev->admin_queue;
2076 	u32 aq_caps, acq_caps, dev_sts, addr_low, addr_high;
2077 	int ret;
2078 
2079 	dev_sts = ena_com_reg_bar_read32(ena_dev, ENA_REGS_DEV_STS_OFF);
2080 
2081 	if (unlikely(dev_sts == ENA_MMIO_READ_TIMEOUT)) {
2082 		ena_trc_err(ena_dev, "Reg read timeout occurred\n");
2083 		return ENA_COM_TIMER_EXPIRED;
2084 	}
2085 
2086 	if (!(dev_sts & ENA_REGS_DEV_STS_READY_MASK)) {
2087 		ena_trc_err(ena_dev, "Device isn't ready, abort com init\n");
2088 		return ENA_COM_NO_DEVICE;
2089 	}
2090 
2091 	admin_queue->q_depth = ENA_ADMIN_QUEUE_DEPTH;
2092 
2093 	admin_queue->bus = ena_dev->bus;
2094 	admin_queue->q_dmadev = ena_dev->dmadev;
2095 	admin_queue->polling = false;
2096 	admin_queue->curr_cmd_id = 0;
2097 
2098 	ATOMIC32_SET(&admin_queue->outstanding_cmds, 0);
2099 
2100 	ENA_SPINLOCK_INIT(admin_queue->q_lock);
2101 
2102 	ret = ena_com_init_comp_ctxt(admin_queue);
2103 	if (unlikely(ret))
2104 		goto error;
2105 
2106 	ret = ena_com_admin_init_sq(admin_queue);
2107 	if (unlikely(ret))
2108 		goto error;
2109 
2110 	ret = ena_com_admin_init_cq(admin_queue);
2111 	if (unlikely(ret))
2112 		goto error;
2113 
2114 	admin_queue->sq.db_addr = (u32 __iomem *)((uintptr_t)ena_dev->reg_bar +
2115 		ENA_REGS_AQ_DB_OFF);
2116 
2117 	addr_low = ENA_DMA_ADDR_TO_UINT32_LOW(admin_queue->sq.dma_addr);
2118 	addr_high = ENA_DMA_ADDR_TO_UINT32_HIGH(admin_queue->sq.dma_addr);
2119 
2120 	ENA_REG_WRITE32(ena_dev->bus, addr_low, ena_dev->reg_bar + ENA_REGS_AQ_BASE_LO_OFF);
2121 	ENA_REG_WRITE32(ena_dev->bus, addr_high, ena_dev->reg_bar + ENA_REGS_AQ_BASE_HI_OFF);
2122 
2123 	addr_low = ENA_DMA_ADDR_TO_UINT32_LOW(admin_queue->cq.dma_addr);
2124 	addr_high = ENA_DMA_ADDR_TO_UINT32_HIGH(admin_queue->cq.dma_addr);
2125 
2126 	ENA_REG_WRITE32(ena_dev->bus, addr_low, ena_dev->reg_bar + ENA_REGS_ACQ_BASE_LO_OFF);
2127 	ENA_REG_WRITE32(ena_dev->bus, addr_high, ena_dev->reg_bar + ENA_REGS_ACQ_BASE_HI_OFF);
2128 
2129 	aq_caps = 0;
2130 	aq_caps |= ENA_FIELD_PREP(admin_queue->q_depth,
2131 				  ENA_REGS_AQ_CAPS_AQ_DEPTH_MASK,
2132 				  ENA_ZERO_SHIFT);
2133 	aq_caps |= ENA_FIELD_PREP(sizeof(struct ena_admin_aq_entry),
2134 				 ENA_REGS_AQ_CAPS_AQ_ENTRY_SIZE_MASK,
2135 				 ENA_REGS_AQ_CAPS_AQ_ENTRY_SIZE_SHIFT);
2136 
2137 	acq_caps = 0;
2138 	acq_caps |= ENA_FIELD_PREP(admin_queue->q_depth,
2139 				   ENA_REGS_ACQ_CAPS_ACQ_DEPTH_MASK,
2140 				   ENA_ZERO_SHIFT);
2141 	acq_caps |= ENA_FIELD_PREP(sizeof(struct ena_admin_acq_entry),
2142 				   ENA_REGS_ACQ_CAPS_ACQ_ENTRY_SIZE_MASK,
2143 				   ENA_REGS_ACQ_CAPS_ACQ_ENTRY_SIZE_SHIFT);
2144 
2145 	ENA_REG_WRITE32(ena_dev->bus, aq_caps, ena_dev->reg_bar + ENA_REGS_AQ_CAPS_OFF);
2146 	ENA_REG_WRITE32(ena_dev->bus, acq_caps, ena_dev->reg_bar + ENA_REGS_ACQ_CAPS_OFF);
2147 	ret = ena_com_admin_init_aenq(ena_dev, aenq_handlers);
2148 	if (unlikely(ret))
2149 		goto error;
2150 
2151 	admin_queue->ena_dev = ena_dev;
2152 	admin_queue->running_state = true;
2153 	admin_queue->is_missing_admin_interrupt = false;
2154 
2155 	return 0;
2156 error:
2157 	ena_com_admin_destroy(ena_dev);
2158 
2159 	return ret;
2160 }
2161 
ena_com_create_io_queue(struct ena_com_dev * ena_dev,struct ena_com_create_io_ctx * ctx)2162 int ena_com_create_io_queue(struct ena_com_dev *ena_dev,
2163 			    struct ena_com_create_io_ctx *ctx)
2164 {
2165 	struct ena_com_io_sq *io_sq;
2166 	struct ena_com_io_cq *io_cq;
2167 	int ret;
2168 
2169 	if (unlikely(ctx->qid >= ENA_TOTAL_NUM_QUEUES)) {
2170 		ena_trc_err(ena_dev, "Qid (%d) is bigger than max num of queues (%d)\n",
2171 			    ctx->qid, ENA_TOTAL_NUM_QUEUES);
2172 		return ENA_COM_INVAL;
2173 	}
2174 
2175 	io_sq = &ena_dev->io_sq_queues[ctx->qid];
2176 	io_cq = &ena_dev->io_cq_queues[ctx->qid];
2177 
2178 	memset(io_sq, 0x0, sizeof(*io_sq));
2179 	memset(io_cq, 0x0, sizeof(*io_cq));
2180 
2181 	/* Init CQ */
2182 	io_cq->q_depth = ctx->queue_size;
2183 	io_cq->direction = ctx->direction;
2184 	io_cq->qid = ctx->qid;
2185 
2186 	io_cq->msix_vector = ctx->msix_vector;
2187 
2188 	io_sq->q_depth = ctx->queue_size;
2189 	io_sq->direction = ctx->direction;
2190 	io_sq->qid = ctx->qid;
2191 
2192 	io_sq->mem_queue_type = ctx->mem_queue_type;
2193 
2194 	if (ctx->direction == ENA_COM_IO_QUEUE_DIRECTION_TX)
2195 		/* header length is limited to 8 bits */
2196 		io_sq->tx_max_header_size =
2197 			ENA_MIN32(ena_dev->tx_max_header_size, SZ_256);
2198 
2199 	ret = ena_com_init_io_sq(ena_dev, ctx, io_sq);
2200 	if (unlikely(ret))
2201 		goto error;
2202 	ret = ena_com_init_io_cq(ena_dev, ctx, io_cq);
2203 	if (unlikely(ret))
2204 		goto error;
2205 
2206 	ret = ena_com_create_io_cq(ena_dev, io_cq);
2207 	if (unlikely(ret))
2208 		goto error;
2209 
2210 	ret = ena_com_create_io_sq(ena_dev, io_sq, io_cq->idx);
2211 	if (unlikely(ret))
2212 		goto destroy_io_cq;
2213 
2214 	return 0;
2215 
2216 destroy_io_cq:
2217 	ena_com_destroy_io_cq(ena_dev, io_cq);
2218 error:
2219 	ena_com_io_queue_free(ena_dev, io_sq, io_cq);
2220 	return ret;
2221 }
2222 
ena_com_destroy_io_queue(struct ena_com_dev * ena_dev,u16 qid)2223 void ena_com_destroy_io_queue(struct ena_com_dev *ena_dev, u16 qid)
2224 {
2225 	struct ena_com_io_sq *io_sq;
2226 	struct ena_com_io_cq *io_cq;
2227 
2228 	if (unlikely(qid >= ENA_TOTAL_NUM_QUEUES)) {
2229 		ena_trc_err(ena_dev, "Qid (%d) is bigger than max num of queues (%d)\n",
2230 			    qid, ENA_TOTAL_NUM_QUEUES);
2231 		return;
2232 	}
2233 
2234 	io_sq = &ena_dev->io_sq_queues[qid];
2235 	io_cq = &ena_dev->io_cq_queues[qid];
2236 
2237 	ena_com_destroy_io_sq(ena_dev, io_sq);
2238 	ena_com_destroy_io_cq(ena_dev, io_cq);
2239 
2240 	ena_com_io_queue_free(ena_dev, io_sq, io_cq);
2241 }
2242 
ena_com_get_link_params(struct ena_com_dev * ena_dev,struct ena_admin_get_feat_resp * resp)2243 int ena_com_get_link_params(struct ena_com_dev *ena_dev,
2244 			    struct ena_admin_get_feat_resp *resp)
2245 {
2246 	return ena_com_get_feature(ena_dev, resp, ENA_ADMIN_LINK_CONFIG, 0);
2247 }
2248 
ena_get_dev_stats(struct ena_com_dev * ena_dev,struct ena_com_stats_ctx * ctx,enum ena_admin_get_stats_type type)2249 static int ena_get_dev_stats(struct ena_com_dev *ena_dev,
2250 			     struct ena_com_stats_ctx *ctx,
2251 			     enum ena_admin_get_stats_type type)
2252 {
2253 	struct ena_admin_acq_get_stats_resp *get_resp = &ctx->get_resp;
2254 	struct ena_admin_aq_get_stats_cmd *get_cmd = &ctx->get_cmd;
2255 	struct ena_com_admin_queue *admin_queue;
2256 	int ret;
2257 
2258 	admin_queue = &ena_dev->admin_queue;
2259 
2260 	get_cmd->aq_common_descriptor.opcode = ENA_ADMIN_GET_STATS;
2261 	get_cmd->aq_common_descriptor.flags = 0;
2262 	get_cmd->type = type;
2263 
2264 	ret = ena_com_execute_admin_command(admin_queue,
2265 					    (struct ena_admin_aq_entry *)get_cmd,
2266 					    sizeof(*get_cmd),
2267 					    (struct ena_admin_acq_entry *)get_resp,
2268 					    sizeof(*get_resp));
2269 
2270 	if (unlikely(ret))
2271 		ena_trc_err(ena_dev, "Failed to get stats. error: %d\n", ret);
2272 
2273 	return ret;
2274 }
2275 
ena_com_set_supported_customer_metrics(struct ena_com_dev * ena_dev)2276 static void ena_com_set_supported_customer_metrics(struct ena_com_dev *ena_dev)
2277 {
2278 	struct ena_customer_metrics *customer_metrics;
2279 	struct ena_com_stats_ctx ctx;
2280 	int ret;
2281 
2282 	customer_metrics = &ena_dev->customer_metrics;
2283 	if (!ena_com_get_cap(ena_dev, ENA_ADMIN_CUSTOMER_METRICS)) {
2284 		customer_metrics->supported_metrics = ENA_ADMIN_CUSTOMER_METRICS_MIN_SUPPORT_MASK;
2285 		return;
2286 	}
2287 
2288 	memset(&ctx, 0x0, sizeof(ctx));
2289 	ctx.get_cmd.requested_metrics = ENA_ADMIN_CUSTOMER_METRICS_SUPPORT_MASK;
2290 	ret = ena_get_dev_stats(ena_dev, &ctx, ENA_ADMIN_GET_STATS_TYPE_CUSTOMER_METRICS);
2291 	if (likely(ret == 0))
2292 		customer_metrics->supported_metrics =
2293 			ctx.get_resp.u.customer_metrics.reported_metrics;
2294 	else
2295 		ena_trc_err(ena_dev, "Failed to query customer metrics support. error: %d\n", ret);
2296 }
2297 
ena_com_get_dev_attr_feat(struct ena_com_dev * ena_dev,struct ena_com_dev_get_features_ctx * get_feat_ctx)2298 int ena_com_get_dev_attr_feat(struct ena_com_dev *ena_dev,
2299 			      struct ena_com_dev_get_features_ctx *get_feat_ctx)
2300 {
2301 	struct ena_admin_get_feat_resp get_resp;
2302 	int rc;
2303 
2304 	rc = ena_com_get_feature(ena_dev, &get_resp,
2305 				 ENA_ADMIN_DEVICE_ATTRIBUTES, 0);
2306 	if (rc)
2307 		return rc;
2308 
2309 	memcpy(&get_feat_ctx->dev_attr, &get_resp.u.dev_attr,
2310 	       sizeof(get_resp.u.dev_attr));
2311 
2312 	ena_dev->supported_features = get_resp.u.dev_attr.supported_features;
2313 	ena_dev->capabilities = get_resp.u.dev_attr.capabilities;
2314 
2315 	if (ena_dev->supported_features & BIT(ENA_ADMIN_MAX_QUEUES_EXT)) {
2316 		rc = ena_com_get_feature(ena_dev, &get_resp,
2317 					 ENA_ADMIN_MAX_QUEUES_EXT,
2318 					 ENA_FEATURE_MAX_QUEUE_EXT_VER);
2319 		if (rc)
2320 			return rc;
2321 
2322 		if (get_resp.u.max_queue_ext.version != ENA_FEATURE_MAX_QUEUE_EXT_VER)
2323 			return ENA_COM_INVAL;
2324 
2325 		memcpy(&get_feat_ctx->max_queue_ext, &get_resp.u.max_queue_ext,
2326 		       sizeof(get_resp.u.max_queue_ext));
2327 		ena_dev->tx_max_header_size =
2328 			get_resp.u.max_queue_ext.max_queue_ext.max_tx_header_size;
2329 	} else {
2330 		rc = ena_com_get_feature(ena_dev, &get_resp,
2331 					 ENA_ADMIN_MAX_QUEUES_NUM, 0);
2332 		memcpy(&get_feat_ctx->max_queues, &get_resp.u.max_queue,
2333 		       sizeof(get_resp.u.max_queue));
2334 		ena_dev->tx_max_header_size =
2335 			get_resp.u.max_queue.max_header_size;
2336 
2337 		if (rc)
2338 			return rc;
2339 	}
2340 
2341 	rc = ena_com_get_feature(ena_dev, &get_resp,
2342 				 ENA_ADMIN_AENQ_CONFIG, 0);
2343 	if (rc)
2344 		return rc;
2345 
2346 	memcpy(&get_feat_ctx->aenq, &get_resp.u.aenq,
2347 	       sizeof(get_resp.u.aenq));
2348 
2349 	rc = ena_com_get_feature(ena_dev, &get_resp,
2350 				 ENA_ADMIN_STATELESS_OFFLOAD_CONFIG, 0);
2351 	if (rc)
2352 		return rc;
2353 
2354 	memcpy(&get_feat_ctx->offload, &get_resp.u.offload,
2355 	       sizeof(get_resp.u.offload));
2356 
2357 	/* Driver hints isn't mandatory admin command. So in case the
2358 	 * command isn't supported set driver hints to 0
2359 	 */
2360 	rc = ena_com_get_feature(ena_dev, &get_resp, ENA_ADMIN_HW_HINTS, 0);
2361 
2362 	if (!rc)
2363 		memcpy(&get_feat_ctx->hw_hints, &get_resp.u.hw_hints,
2364 		       sizeof(get_resp.u.hw_hints));
2365 	else if (rc == ENA_COM_UNSUPPORTED)
2366 		memset(&get_feat_ctx->hw_hints, 0x0, sizeof(get_feat_ctx->hw_hints));
2367 	else
2368 		return rc;
2369 
2370 	rc = ena_com_get_feature(ena_dev, &get_resp,
2371 				 ENA_ADMIN_LLQ, ENA_ADMIN_LLQ_FEATURE_VERSION_1);
2372 	if (!rc)
2373 		memcpy(&get_feat_ctx->llq, &get_resp.u.llq,
2374 		       sizeof(get_resp.u.llq));
2375 	else if (rc == ENA_COM_UNSUPPORTED)
2376 		memset(&get_feat_ctx->llq, 0x0, sizeof(get_feat_ctx->llq));
2377 	else
2378 		return rc;
2379 
2380 	ena_com_set_supported_customer_metrics(ena_dev);
2381 
2382 	return 0;
2383 }
2384 
ena_com_admin_q_comp_intr_handler(struct ena_com_dev * ena_dev)2385 void ena_com_admin_q_comp_intr_handler(struct ena_com_dev *ena_dev)
2386 {
2387 	ena_com_handle_admin_completion(&ena_dev->admin_queue);
2388 }
2389 
2390 /* ena_handle_specific_aenq_event:
2391  * return the handler that is relevant to the specific event group
2392  */
ena_com_get_specific_aenq_cb(struct ena_com_dev * ena_dev,u16 group)2393 static ena_aenq_handler ena_com_get_specific_aenq_cb(struct ena_com_dev *ena_dev,
2394 						     u16 group)
2395 {
2396 	struct ena_aenq_handlers *aenq_handlers = ena_dev->aenq.aenq_handlers;
2397 
2398 	if ((group < ENA_MAX_HANDLERS) && aenq_handlers->handlers[group])
2399 		return aenq_handlers->handlers[group];
2400 
2401 	return aenq_handlers->unimplemented_handler;
2402 }
2403 
2404 /* ena_aenq_intr_handler:
2405  * handles the aenq incoming events.
2406  * pop events from the queue and apply the specific handler
2407  */
ena_com_aenq_intr_handler(struct ena_com_dev * ena_dev,void * data)2408 void ena_com_aenq_intr_handler(struct ena_com_dev *ena_dev, void *data)
2409 {
2410 	struct ena_admin_aenq_entry *aenq_e;
2411 	struct ena_admin_aenq_common_desc *aenq_common;
2412 	struct ena_com_aenq *aenq  = &ena_dev->aenq;
2413 	ena_aenq_handler handler_cb;
2414 	u16 masked_head, processed = 0;
2415 	u8 phase;
2416 
2417 	masked_head = aenq->head & (aenq->q_depth - 1);
2418 	phase = aenq->phase;
2419 	aenq_e = &aenq->entries[masked_head]; /* Get first entry */
2420 	aenq_common = &aenq_e->aenq_common_desc;
2421 
2422 	/* Go over all the events */
2423 	while ((READ_ONCE8(aenq_common->flags) &
2424 		ENA_ADMIN_AENQ_COMMON_DESC_PHASE_MASK) == phase) {
2425 		/* When the phase bit of the AENQ descriptor aligns with the driver's phase bit,
2426 		 * it signifies the readiness of the entire AENQ descriptor.
2427 		 * The driver should proceed to read the descriptor's data only after confirming
2428 		 * and synchronizing the phase bit.
2429 		 * This memory fence guarantees the correct sequence of accesses to the
2430 		 * descriptor's memory.
2431 		 */
2432 		dma_rmb();
2433 
2434 		ena_trc_dbg(ena_dev, "AENQ! Group[%x] Syndrome[%x] timestamp: [%" ENA_PRIu64 "s]\n",
2435 			    aenq_common->group,
2436 			    aenq_common->syndrome,
2437 			    ((u64)aenq_common->timestamp_low |
2438 			    ((u64)aenq_common->timestamp_high << 32)));
2439 
2440 		/* Handle specific event*/
2441 		handler_cb = ena_com_get_specific_aenq_cb(ena_dev,
2442 							  aenq_common->group);
2443 		handler_cb(data, aenq_e); /* call the actual event handler*/
2444 
2445 		/* Get next event entry */
2446 		masked_head++;
2447 		processed++;
2448 
2449 		if (unlikely(masked_head == aenq->q_depth)) {
2450 			masked_head = 0;
2451 			phase = !phase;
2452 		}
2453 		aenq_e = &aenq->entries[masked_head];
2454 		aenq_common = &aenq_e->aenq_common_desc;
2455 	}
2456 
2457 	aenq->head += processed;
2458 	aenq->phase = phase;
2459 
2460 	/* Don't update aenq doorbell if there weren't any processed events */
2461 	if (!processed)
2462 		return;
2463 
2464 	/* write the aenq doorbell after all AENQ descriptors were read */
2465 	mb();
2466 	ENA_REG_WRITE32_RELAXED(ena_dev->bus, (u32)aenq->head,
2467 				ena_dev->reg_bar + ENA_REGS_AENQ_HEAD_DB_OFF);
2468 	mmiowb();
2469 }
2470 
ena_com_aenq_has_keep_alive(struct ena_com_dev * ena_dev)2471 bool ena_com_aenq_has_keep_alive(struct ena_com_dev *ena_dev)
2472 {
2473 	struct ena_admin_aenq_common_desc *aenq_common;
2474 	struct ena_com_aenq *aenq = &ena_dev->aenq;
2475 	struct ena_admin_aenq_entry *aenq_e;
2476 	u8 phase = aenq->phase;
2477 	u16 masked_head;
2478 
2479 	masked_head = aenq->head & (aenq->q_depth - 1);
2480 	aenq_e = &aenq->entries[masked_head]; /* Get first entry */
2481 	aenq_common = &aenq_e->aenq_common_desc;
2482 
2483 	/* Go over all the events */
2484 	while ((READ_ONCE8(aenq_common->flags) &
2485 		ENA_ADMIN_AENQ_COMMON_DESC_PHASE_MASK) == phase) {
2486 		/* When the phase bit of the AENQ descriptor aligns with the driver's phase bit,
2487 		 * it signifies the readiness of the entire AENQ descriptor.
2488 		 * The driver should proceed to read the descriptor's data only after confirming
2489 		 * and synchronizing the phase bit.
2490 		 * This memory fence guarantees the correct sequence of accesses to the
2491 		 * descriptor's memory.
2492 		 */
2493 		dma_rmb();
2494 
2495 		if (aenq_common->group == ENA_ADMIN_KEEP_ALIVE)
2496 			return true;
2497 
2498 		/* Get next event entry */
2499 		masked_head++;
2500 
2501 		if (unlikely(masked_head == aenq->q_depth)) {
2502 			masked_head = 0;
2503 			phase = !phase;
2504 		}
2505 
2506 		aenq_e = &aenq->entries[masked_head];
2507 		aenq_common = &aenq_e->aenq_common_desc;
2508 	}
2509 
2510 	return false;
2511 }
2512 
2513 
ena_com_dev_reset(struct ena_com_dev * ena_dev,enum ena_regs_reset_reason_types reset_reason)2514 int ena_com_dev_reset(struct ena_com_dev *ena_dev,
2515 		      enum ena_regs_reset_reason_types reset_reason)
2516 {
2517 	u32 reset_reason_msb, reset_reason_lsb;
2518 	u32 stat, timeout, cap, reset_val;
2519 	int rc;
2520 
2521 	stat = ena_com_reg_bar_read32(ena_dev, ENA_REGS_DEV_STS_OFF);
2522 	cap = ena_com_reg_bar_read32(ena_dev, ENA_REGS_CAPS_OFF);
2523 
2524 	if (unlikely((stat == ENA_MMIO_READ_TIMEOUT) ||
2525 		     (cap == ENA_MMIO_READ_TIMEOUT))) {
2526 		ena_trc_err(ena_dev, "Reg read32 timeout occurred\n");
2527 		return ENA_COM_TIMER_EXPIRED;
2528 	}
2529 
2530 	if ((stat & ENA_REGS_DEV_STS_READY_MASK) == 0) {
2531 		ena_trc_err(ena_dev, "Device isn't ready, can't reset device\n");
2532 		return ENA_COM_INVAL;
2533 	}
2534 
2535 	timeout = ENA_FIELD_GET(cap,
2536 				ENA_REGS_CAPS_RESET_TIMEOUT_MASK,
2537 				ENA_REGS_CAPS_RESET_TIMEOUT_SHIFT);
2538 	if (timeout == 0) {
2539 		ena_trc_err(ena_dev, "Invalid timeout value\n");
2540 		return ENA_COM_INVAL;
2541 	}
2542 
2543 	/* start reset */
2544 	reset_val = ENA_REGS_DEV_CTL_DEV_RESET_MASK;
2545 
2546 	/* For backward compatibility, device will interpret
2547 	 * bits 24-27 as MSB, bits 28-31 as LSB
2548 	 */
2549 	reset_reason_lsb = ENA_FIELD_GET(reset_reason,
2550 					 ENA_RESET_REASON_LSB_MASK,
2551 					 ENA_RESET_REASON_LSB_OFFSET);
2552 
2553 	reset_reason_msb = ENA_FIELD_GET(reset_reason,
2554 					 ENA_RESET_REASON_MSB_MASK,
2555 					 ENA_RESET_REASON_MSB_OFFSET);
2556 
2557 	reset_val |= reset_reason_lsb << ENA_REGS_DEV_CTL_RESET_REASON_SHIFT;
2558 
2559 	if (ena_com_get_cap(ena_dev, ENA_ADMIN_EXTENDED_RESET_REASONS))
2560 		reset_val |= reset_reason_msb << ENA_REGS_DEV_CTL_RESET_REASON_EXT_SHIFT;
2561 	else if (reset_reason_msb) {
2562 		/* In case the device does not support intended
2563 		 * extended reset reason fallback to generic
2564 		 */
2565 		reset_val = ENA_REGS_DEV_CTL_DEV_RESET_MASK;
2566 		reset_val |= ENA_FIELD_PREP(ENA_REGS_RESET_GENERIC,
2567 					    ENA_REGS_DEV_CTL_RESET_REASON_MASK,
2568 					    ENA_REGS_DEV_CTL_RESET_REASON_SHIFT);
2569 	}
2570 	ENA_REG_WRITE32(ena_dev->bus, reset_val, ena_dev->reg_bar + ENA_REGS_DEV_CTL_OFF);
2571 
2572 	/* Write again the MMIO read request address */
2573 	ena_com_mmio_reg_read_request_write_dev_addr(ena_dev);
2574 
2575 	rc = wait_for_reset_state(ena_dev, timeout,
2576 				  ENA_REGS_DEV_STS_RESET_IN_PROGRESS_MASK);
2577 	if (unlikely(rc)) {
2578 		ena_trc_err(ena_dev, "Reset indication didn't turn on\n");
2579 		return rc;
2580 	}
2581 
2582 	/* reset done */
2583 	ENA_REG_WRITE32(ena_dev->bus, 0, ena_dev->reg_bar + ENA_REGS_DEV_CTL_OFF);
2584 	rc = wait_for_reset_state(ena_dev, timeout, 0);
2585 	if (unlikely(rc)) {
2586 		ena_trc_err(ena_dev, "Reset indication didn't turn off\n");
2587 		return rc;
2588 	}
2589 
2590 	timeout = ENA_FIELD_GET(cap,
2591 				ENA_REGS_CAPS_ADMIN_CMD_TO_MASK,
2592 				ENA_REGS_CAPS_ADMIN_CMD_TO_SHIFT);
2593 	if (timeout)
2594 		/* the resolution of timeout reg is 100ms */
2595 		ena_dev->admin_queue.completion_timeout = timeout * 100000;
2596 	else
2597 		ena_dev->admin_queue.completion_timeout = ADMIN_CMD_TIMEOUT_US;
2598 
2599 	return 0;
2600 }
2601 
ena_com_get_eni_stats(struct ena_com_dev * ena_dev,struct ena_admin_eni_stats * stats)2602 int ena_com_get_eni_stats(struct ena_com_dev *ena_dev,
2603 			  struct ena_admin_eni_stats *stats)
2604 {
2605 	struct ena_com_stats_ctx ctx;
2606 	int ret;
2607 
2608 	if (!ena_com_get_cap(ena_dev, ENA_ADMIN_ENI_STATS)) {
2609 		ena_trc_err(ena_dev, "Capability %d isn't supported\n", ENA_ADMIN_ENI_STATS);
2610 		return ENA_COM_UNSUPPORTED;
2611 	}
2612 
2613 	memset(&ctx, 0x0, sizeof(ctx));
2614 	ret = ena_get_dev_stats(ena_dev, &ctx, ENA_ADMIN_GET_STATS_TYPE_ENI);
2615 	if (likely(ret == 0))
2616 		memcpy(stats, &ctx.get_resp.u.eni_stats,
2617 		       sizeof(ctx.get_resp.u.eni_stats));
2618 
2619 	return ret;
2620 }
2621 
ena_com_get_ena_srd_info(struct ena_com_dev * ena_dev,struct ena_admin_ena_srd_info * info)2622 int ena_com_get_ena_srd_info(struct ena_com_dev *ena_dev,
2623 			      struct ena_admin_ena_srd_info *info)
2624 {
2625 	struct ena_com_stats_ctx ctx;
2626 	int ret;
2627 
2628 	if (!ena_com_get_cap(ena_dev, ENA_ADMIN_ENA_SRD_INFO)) {
2629 		ena_trc_err(ena_dev, "Capability %d isn't supported\n", ENA_ADMIN_ENA_SRD_INFO);
2630 		return ENA_COM_UNSUPPORTED;
2631 	}
2632 
2633 	memset(&ctx, 0x0, sizeof(ctx));
2634 	ret = ena_get_dev_stats(ena_dev, &ctx, ENA_ADMIN_GET_STATS_TYPE_ENA_SRD);
2635 	if (likely(ret == 0))
2636 		memcpy(info, &ctx.get_resp.u.ena_srd_info,
2637 		       sizeof(ctx.get_resp.u.ena_srd_info));
2638 
2639 	return ret;
2640 }
2641 
ena_com_get_dev_basic_stats(struct ena_com_dev * ena_dev,struct ena_admin_basic_stats * stats)2642 int ena_com_get_dev_basic_stats(struct ena_com_dev *ena_dev,
2643 				struct ena_admin_basic_stats *stats)
2644 {
2645 	struct ena_com_stats_ctx ctx;
2646 	int ret;
2647 
2648 	memset(&ctx, 0x0, sizeof(ctx));
2649 	ret = ena_get_dev_stats(ena_dev, &ctx, ENA_ADMIN_GET_STATS_TYPE_BASIC);
2650 	if (likely(ret == 0))
2651 		memcpy(stats, &ctx.get_resp.u.basic_stats,
2652 		       sizeof(ctx.get_resp.u.basic_stats));
2653 
2654 	return ret;
2655 }
2656 
ena_com_get_customer_metrics(struct ena_com_dev * ena_dev,char * buffer,u32 len)2657 int ena_com_get_customer_metrics(struct ena_com_dev *ena_dev, char *buffer, u32 len)
2658 {
2659 	struct ena_admin_aq_get_stats_cmd *get_cmd;
2660 	struct ena_com_stats_ctx ctx;
2661 	int ret;
2662 
2663 	if (unlikely(len > ena_dev->customer_metrics.buffer_len)) {
2664 		ena_trc_err(ena_dev, "Invalid buffer size %u. The given buffer is too big.\n", len);
2665 		return ENA_COM_INVAL;
2666 	}
2667 
2668 	if (!ena_com_get_cap(ena_dev, ENA_ADMIN_CUSTOMER_METRICS)) {
2669 		ena_trc_err(ena_dev, "Capability %d not supported.\n", ENA_ADMIN_CUSTOMER_METRICS);
2670 		return ENA_COM_UNSUPPORTED;
2671 	}
2672 
2673 	if (!ena_dev->customer_metrics.supported_metrics) {
2674 		ena_trc_err(ena_dev, "No supported customer metrics.\n");
2675 		return ENA_COM_UNSUPPORTED;
2676 	}
2677 
2678 	get_cmd = &ctx.get_cmd;
2679 	memset(&ctx, 0x0, sizeof(ctx));
2680 	ret = ena_com_mem_addr_set(ena_dev,
2681 		&get_cmd->u.control_buffer.address,
2682 		ena_dev->customer_metrics.buffer_dma_addr);
2683 	if (unlikely(ret)) {
2684 		ena_trc_err(ena_dev, "Memory address set failed.\n");
2685 		return ret;
2686 	}
2687 
2688 	get_cmd->u.control_buffer.length = ena_dev->customer_metrics.buffer_len;
2689 	get_cmd->requested_metrics = ena_dev->customer_metrics.supported_metrics;
2690 	ret = ena_get_dev_stats(ena_dev, &ctx, ENA_ADMIN_GET_STATS_TYPE_CUSTOMER_METRICS);
2691 	if (likely(ret == 0))
2692 		memcpy(buffer, ena_dev->customer_metrics.buffer_virt_addr, len);
2693 	else
2694 		ena_trc_err(ena_dev, "Failed to get customer metrics. error: %d\n", ret);
2695 
2696 	return ret;
2697 }
2698 
ena_com_set_dev_mtu(struct ena_com_dev * ena_dev,u32 mtu)2699 int ena_com_set_dev_mtu(struct ena_com_dev *ena_dev, u32 mtu)
2700 {
2701 	struct ena_com_admin_queue *admin_queue;
2702 	struct ena_admin_set_feat_cmd cmd;
2703 	struct ena_admin_set_feat_resp resp;
2704 	int ret;
2705 
2706 	if (!ena_com_check_supported_feature_id(ena_dev, ENA_ADMIN_MTU)) {
2707 		ena_trc_dbg(ena_dev, "Feature %d isn't supported\n", ENA_ADMIN_MTU);
2708 		return ENA_COM_UNSUPPORTED;
2709 	}
2710 
2711 	memset(&cmd, 0x0, sizeof(cmd));
2712 	admin_queue = &ena_dev->admin_queue;
2713 
2714 	cmd.aq_common_descriptor.opcode = ENA_ADMIN_SET_FEATURE;
2715 	cmd.aq_common_descriptor.flags = 0;
2716 	cmd.feat_common.feature_id = ENA_ADMIN_MTU;
2717 	cmd.u.mtu.mtu = mtu;
2718 
2719 	ret = ena_com_execute_admin_command(admin_queue,
2720 					    (struct ena_admin_aq_entry *)&cmd,
2721 					    sizeof(cmd),
2722 					    (struct ena_admin_acq_entry *)&resp,
2723 					    sizeof(resp));
2724 
2725 	if (unlikely(ret))
2726 		ena_trc_err(ena_dev, "Failed to set mtu %d. error: %d\n", mtu, ret);
2727 
2728 	return ret;
2729 }
2730 
ena_com_get_offload_settings(struct ena_com_dev * ena_dev,struct ena_admin_feature_offload_desc * offload)2731 int ena_com_get_offload_settings(struct ena_com_dev *ena_dev,
2732 				 struct ena_admin_feature_offload_desc *offload)
2733 {
2734 	int ret;
2735 	struct ena_admin_get_feat_resp resp;
2736 
2737 	ret = ena_com_get_feature(ena_dev, &resp,
2738 				  ENA_ADMIN_STATELESS_OFFLOAD_CONFIG, 0);
2739 	if (unlikely(ret)) {
2740 		ena_trc_err(ena_dev, "Failed to get offload capabilities %d\n", ret);
2741 		return ret;
2742 	}
2743 
2744 	memcpy(offload, &resp.u.offload, sizeof(resp.u.offload));
2745 
2746 	return 0;
2747 }
2748 
ena_com_set_hash_function(struct ena_com_dev * ena_dev)2749 int ena_com_set_hash_function(struct ena_com_dev *ena_dev)
2750 {
2751 	struct ena_com_admin_queue *admin_queue = &ena_dev->admin_queue;
2752 	struct ena_rss *rss = &ena_dev->rss;
2753 	struct ena_admin_set_feat_cmd cmd;
2754 	struct ena_admin_set_feat_resp resp;
2755 	struct ena_admin_get_feat_resp get_resp;
2756 	int ret;
2757 
2758 	if (!ena_com_check_supported_feature_id(ena_dev,
2759 						ENA_ADMIN_RSS_HASH_FUNCTION)) {
2760 		ena_trc_dbg(ena_dev, "Feature %d isn't supported\n",
2761 			    ENA_ADMIN_RSS_HASH_FUNCTION);
2762 		return ENA_COM_UNSUPPORTED;
2763 	}
2764 
2765 	/* Validate hash function is supported */
2766 	ret = ena_com_get_feature(ena_dev, &get_resp,
2767 				  ENA_ADMIN_RSS_HASH_FUNCTION, 0);
2768 	if (unlikely(ret))
2769 		return ret;
2770 
2771 	if (!(get_resp.u.flow_hash_func.supported_func & BIT(rss->hash_func))) {
2772 		ena_trc_err(ena_dev, "Func hash %d isn't supported by device, abort\n",
2773 			    rss->hash_func);
2774 		return ENA_COM_UNSUPPORTED;
2775 	}
2776 
2777 	memset(&cmd, 0x0, sizeof(cmd));
2778 
2779 	cmd.aq_common_descriptor.opcode = ENA_ADMIN_SET_FEATURE;
2780 	cmd.aq_common_descriptor.flags =
2781 		ENA_ADMIN_AQ_COMMON_DESC_CTRL_DATA_INDIRECT_MASK;
2782 	cmd.feat_common.feature_id = ENA_ADMIN_RSS_HASH_FUNCTION;
2783 	cmd.u.flow_hash_func.init_val = rss->hash_init_val;
2784 	cmd.u.flow_hash_func.selected_func = 1 << rss->hash_func;
2785 
2786 	ret = ena_com_mem_addr_set(ena_dev,
2787 				   &cmd.control_buffer.address,
2788 				   rss->hash_key_dma_addr);
2789 	if (unlikely(ret)) {
2790 		ena_trc_err(ena_dev, "Memory address set failed\n");
2791 		return ret;
2792 	}
2793 
2794 	cmd.control_buffer.length = sizeof(*rss->hash_key);
2795 
2796 	ret = ena_com_execute_admin_command(admin_queue,
2797 					    (struct ena_admin_aq_entry *)&cmd,
2798 					    sizeof(cmd),
2799 					    (struct ena_admin_acq_entry *)&resp,
2800 					    sizeof(resp));
2801 	if (unlikely(ret)) {
2802 		ena_trc_err(ena_dev, "Failed to set hash function %d. error: %d\n",
2803 			    rss->hash_func, ret);
2804 		return ENA_COM_INVAL;
2805 	}
2806 
2807 	return 0;
2808 }
2809 
ena_com_fill_hash_function(struct ena_com_dev * ena_dev,enum ena_admin_hash_functions func,const u8 * key,u16 key_len,u32 init_val)2810 int ena_com_fill_hash_function(struct ena_com_dev *ena_dev,
2811 			       enum ena_admin_hash_functions func,
2812 			       const u8 *key, u16 key_len, u32 init_val)
2813 {
2814 	struct ena_admin_feature_rss_flow_hash_control *hash_key;
2815 	struct ena_admin_get_feat_resp get_resp;
2816 	enum ena_admin_hash_functions old_func;
2817 	struct ena_rss *rss = &ena_dev->rss;
2818 	int rc;
2819 
2820 	hash_key = rss->hash_key;
2821 
2822 	/* Make sure size is a mult of DWs */
2823 	if (unlikely(key_len & 0x3))
2824 		return ENA_COM_INVAL;
2825 
2826 	rc = ena_com_get_feature_ex(ena_dev, &get_resp,
2827 				    ENA_ADMIN_RSS_HASH_FUNCTION,
2828 				    rss->hash_key_dma_addr,
2829 				    sizeof(*rss->hash_key), 0);
2830 	if (unlikely(rc))
2831 		return rc;
2832 
2833 	if (!(BIT(func) & get_resp.u.flow_hash_func.supported_func)) {
2834 		ena_trc_err(ena_dev, "Flow hash function %d isn't supported\n", func);
2835 		return ENA_COM_UNSUPPORTED;
2836 	}
2837 
2838 	if (func == ENA_ADMIN_TOEPLITZ && key) {
2839 		if (key_len != sizeof(hash_key->key)) {
2840 			ena_trc_err(ena_dev, "key len (%u) doesn't equal the supported size (%zu)\n",
2841 				    key_len, sizeof(hash_key->key));
2842 			return ENA_COM_INVAL;
2843 		}
2844 		memcpy(hash_key->key, key, key_len);
2845 		hash_key->key_parts = key_len / sizeof(hash_key->key[0]);
2846 	}
2847 
2848 	rss->hash_init_val = init_val;
2849 	old_func = rss->hash_func;
2850 	rss->hash_func = func;
2851 	rc = ena_com_set_hash_function(ena_dev);
2852 
2853 	/* Restore the old function */
2854 	if (unlikely(rc))
2855 		rss->hash_func = old_func;
2856 
2857 	return rc;
2858 }
2859 
ena_com_get_hash_function(struct ena_com_dev * ena_dev,enum ena_admin_hash_functions * func)2860 int ena_com_get_hash_function(struct ena_com_dev *ena_dev,
2861 			      enum ena_admin_hash_functions *func)
2862 {
2863 	struct ena_rss *rss = &ena_dev->rss;
2864 	struct ena_admin_get_feat_resp get_resp;
2865 	int rc;
2866 
2867 	if (unlikely(!func))
2868 		return ENA_COM_INVAL;
2869 
2870 	rc = ena_com_get_feature_ex(ena_dev, &get_resp,
2871 				    ENA_ADMIN_RSS_HASH_FUNCTION,
2872 				    rss->hash_key_dma_addr,
2873 				    sizeof(*rss->hash_key), 0);
2874 	if (unlikely(rc))
2875 		return rc;
2876 
2877 	/* ENA_FFS() returns 1 in case the lsb is set */
2878 	rss->hash_func = ENA_FFS(get_resp.u.flow_hash_func.selected_func);
2879 	if (rss->hash_func)
2880 		rss->hash_func--;
2881 
2882 	*func = rss->hash_func;
2883 
2884 	return 0;
2885 }
2886 
ena_com_get_hash_key(struct ena_com_dev * ena_dev,u8 * key)2887 int ena_com_get_hash_key(struct ena_com_dev *ena_dev, u8 *key)
2888 {
2889 	struct ena_admin_feature_rss_flow_hash_control *hash_key =
2890 		ena_dev->rss.hash_key;
2891 
2892 	if (key)
2893 		memcpy(key, hash_key->key,
2894 		       (size_t)(hash_key->key_parts) * sizeof(hash_key->key[0]));
2895 
2896 	return 0;
2897 }
2898 
ena_com_get_hash_ctrl(struct ena_com_dev * ena_dev,enum ena_admin_flow_hash_proto proto,u16 * fields)2899 int ena_com_get_hash_ctrl(struct ena_com_dev *ena_dev,
2900 			  enum ena_admin_flow_hash_proto proto,
2901 			  u16 *fields)
2902 {
2903 	struct ena_rss *rss = &ena_dev->rss;
2904 	struct ena_admin_get_feat_resp get_resp;
2905 	int rc;
2906 
2907 	rc = ena_com_get_feature_ex(ena_dev, &get_resp,
2908 				    ENA_ADMIN_RSS_HASH_INPUT,
2909 				    rss->hash_ctrl_dma_addr,
2910 				    sizeof(*rss->hash_ctrl), 0);
2911 	if (unlikely(rc))
2912 		return rc;
2913 
2914 	if (fields)
2915 		*fields = rss->hash_ctrl->selected_fields[proto].fields;
2916 
2917 	return 0;
2918 }
2919 
ena_com_set_hash_ctrl(struct ena_com_dev * ena_dev)2920 int ena_com_set_hash_ctrl(struct ena_com_dev *ena_dev)
2921 {
2922 	struct ena_com_admin_queue *admin_queue = &ena_dev->admin_queue;
2923 	struct ena_rss *rss = &ena_dev->rss;
2924 	struct ena_admin_feature_rss_hash_control *hash_ctrl = rss->hash_ctrl;
2925 	struct ena_admin_set_feat_cmd cmd;
2926 	struct ena_admin_set_feat_resp resp;
2927 	int ret;
2928 
2929 	if (!ena_com_check_supported_feature_id(ena_dev,
2930 						ENA_ADMIN_RSS_HASH_INPUT)) {
2931 		ena_trc_dbg(ena_dev, "Feature %d isn't supported\n",
2932 			    ENA_ADMIN_RSS_HASH_INPUT);
2933 		return ENA_COM_UNSUPPORTED;
2934 	}
2935 
2936 	memset(&cmd, 0x0, sizeof(cmd));
2937 
2938 	cmd.aq_common_descriptor.opcode = ENA_ADMIN_SET_FEATURE;
2939 	cmd.aq_common_descriptor.flags =
2940 		ENA_ADMIN_AQ_COMMON_DESC_CTRL_DATA_INDIRECT_MASK;
2941 	cmd.feat_common.feature_id = ENA_ADMIN_RSS_HASH_INPUT;
2942 	cmd.u.flow_hash_input.enabled_input_sort =
2943 		ENA_ADMIN_FEATURE_RSS_FLOW_HASH_INPUT_L3_SORT_MASK |
2944 		ENA_ADMIN_FEATURE_RSS_FLOW_HASH_INPUT_L4_SORT_MASK;
2945 
2946 	ret = ena_com_mem_addr_set(ena_dev,
2947 				   &cmd.control_buffer.address,
2948 				   rss->hash_ctrl_dma_addr);
2949 	if (unlikely(ret)) {
2950 		ena_trc_err(ena_dev, "Memory address set failed\n");
2951 		return ret;
2952 	}
2953 	cmd.control_buffer.length = sizeof(*hash_ctrl);
2954 
2955 	ret = ena_com_execute_admin_command(admin_queue,
2956 					    (struct ena_admin_aq_entry *)&cmd,
2957 					    sizeof(cmd),
2958 					    (struct ena_admin_acq_entry *)&resp,
2959 					    sizeof(resp));
2960 	if (unlikely(ret))
2961 		ena_trc_err(ena_dev, "Failed to set hash input. error: %d\n", ret);
2962 
2963 	return ret;
2964 }
2965 
ena_com_set_default_hash_ctrl(struct ena_com_dev * ena_dev)2966 int ena_com_set_default_hash_ctrl(struct ena_com_dev *ena_dev)
2967 {
2968 	struct ena_rss *rss = &ena_dev->rss;
2969 	struct ena_admin_feature_rss_hash_control *hash_ctrl =
2970 		rss->hash_ctrl;
2971 	u16 available_fields = 0;
2972 	int rc, i;
2973 
2974 	/* Get the supported hash input */
2975 	rc = ena_com_get_hash_ctrl(ena_dev, 0, NULL);
2976 	if (unlikely(rc))
2977 		return rc;
2978 
2979 	hash_ctrl->selected_fields[ENA_ADMIN_RSS_TCP4].fields =
2980 		ENA_ADMIN_RSS_L3_SA | ENA_ADMIN_RSS_L3_DA |
2981 		ENA_ADMIN_RSS_L4_DP | ENA_ADMIN_RSS_L4_SP;
2982 
2983 	hash_ctrl->selected_fields[ENA_ADMIN_RSS_UDP4].fields =
2984 		ENA_ADMIN_RSS_L3_SA | ENA_ADMIN_RSS_L3_DA |
2985 		ENA_ADMIN_RSS_L4_DP | ENA_ADMIN_RSS_L4_SP;
2986 
2987 	hash_ctrl->selected_fields[ENA_ADMIN_RSS_TCP6].fields =
2988 		ENA_ADMIN_RSS_L3_SA | ENA_ADMIN_RSS_L3_DA |
2989 		ENA_ADMIN_RSS_L4_DP | ENA_ADMIN_RSS_L4_SP;
2990 
2991 	hash_ctrl->selected_fields[ENA_ADMIN_RSS_UDP6].fields =
2992 		ENA_ADMIN_RSS_L3_SA | ENA_ADMIN_RSS_L3_DA |
2993 		ENA_ADMIN_RSS_L4_DP | ENA_ADMIN_RSS_L4_SP;
2994 
2995 	hash_ctrl->selected_fields[ENA_ADMIN_RSS_IP4].fields =
2996 		ENA_ADMIN_RSS_L3_SA | ENA_ADMIN_RSS_L3_DA;
2997 
2998 	hash_ctrl->selected_fields[ENA_ADMIN_RSS_IP6].fields =
2999 		ENA_ADMIN_RSS_L3_SA | ENA_ADMIN_RSS_L3_DA;
3000 
3001 	hash_ctrl->selected_fields[ENA_ADMIN_RSS_IP4_FRAG].fields =
3002 		ENA_ADMIN_RSS_L3_SA | ENA_ADMIN_RSS_L3_DA;
3003 
3004 	hash_ctrl->selected_fields[ENA_ADMIN_RSS_NOT_IP].fields =
3005 		ENA_ADMIN_RSS_L2_DA | ENA_ADMIN_RSS_L2_SA;
3006 
3007 	for (i = 0; i < ENA_ADMIN_RSS_PROTO_NUM; i++) {
3008 		available_fields = hash_ctrl->selected_fields[i].fields &
3009 				hash_ctrl->supported_fields[i].fields;
3010 		if (available_fields != hash_ctrl->selected_fields[i].fields) {
3011 			ena_trc_err(ena_dev, "Hash control doesn't support all the desire configuration. proto %x supported %x selected %x\n",
3012 				    i, hash_ctrl->supported_fields[i].fields,
3013 				    hash_ctrl->selected_fields[i].fields);
3014 			return ENA_COM_UNSUPPORTED;
3015 		}
3016 	}
3017 
3018 	rc = ena_com_set_hash_ctrl(ena_dev);
3019 
3020 	/* In case of failure, restore the old hash ctrl */
3021 	if (unlikely(rc))
3022 		ena_com_get_hash_ctrl(ena_dev, 0, NULL);
3023 
3024 	return rc;
3025 }
3026 
ena_com_fill_hash_ctrl(struct ena_com_dev * ena_dev,enum ena_admin_flow_hash_proto proto,u16 hash_fields)3027 int ena_com_fill_hash_ctrl(struct ena_com_dev *ena_dev,
3028 			   enum ena_admin_flow_hash_proto proto,
3029 			   u16 hash_fields)
3030 {
3031 	struct ena_rss *rss = &ena_dev->rss;
3032 	struct ena_admin_feature_rss_hash_control *hash_ctrl = rss->hash_ctrl;
3033 	u16 supported_fields;
3034 	int rc;
3035 
3036 	if (proto >= ENA_ADMIN_RSS_PROTO_NUM) {
3037 		ena_trc_err(ena_dev, "Invalid proto num (%u)\n", proto);
3038 		return ENA_COM_INVAL;
3039 	}
3040 
3041 	/* Get the ctrl table */
3042 	rc = ena_com_get_hash_ctrl(ena_dev, proto, NULL);
3043 	if (unlikely(rc))
3044 		return rc;
3045 
3046 	/* Make sure all the fields are supported */
3047 	supported_fields = hash_ctrl->supported_fields[proto].fields;
3048 	if ((hash_fields & supported_fields) != hash_fields) {
3049 		ena_trc_err(ena_dev, "Proto %d doesn't support the required fields %x. supports only: %x\n",
3050 			    proto, hash_fields, supported_fields);
3051 	}
3052 
3053 	hash_ctrl->selected_fields[proto].fields = hash_fields;
3054 
3055 	rc = ena_com_set_hash_ctrl(ena_dev);
3056 
3057 	/* In case of failure, restore the old hash ctrl */
3058 	if (unlikely(rc))
3059 		ena_com_get_hash_ctrl(ena_dev, 0, NULL);
3060 
3061 	return 0;
3062 }
3063 
ena_com_indirect_table_fill_entry(struct ena_com_dev * ena_dev,u16 entry_idx,u16 entry_value)3064 int ena_com_indirect_table_fill_entry(struct ena_com_dev *ena_dev,
3065 				      u16 entry_idx, u16 entry_value)
3066 {
3067 	struct ena_rss *rss = &ena_dev->rss;
3068 
3069 	if (unlikely(entry_idx >= (1 << rss->tbl_log_size)))
3070 		return ENA_COM_INVAL;
3071 
3072 	if (unlikely((entry_value > ENA_TOTAL_NUM_QUEUES)))
3073 		return ENA_COM_INVAL;
3074 
3075 	rss->host_rss_ind_tbl[entry_idx] = entry_value;
3076 
3077 	return 0;
3078 }
3079 
ena_com_indirect_table_set(struct ena_com_dev * ena_dev)3080 int ena_com_indirect_table_set(struct ena_com_dev *ena_dev)
3081 {
3082 	struct ena_com_admin_queue *admin_queue = &ena_dev->admin_queue;
3083 	struct ena_rss *rss = &ena_dev->rss;
3084 	struct ena_admin_set_feat_cmd cmd;
3085 	struct ena_admin_set_feat_resp resp;
3086 	int ret;
3087 
3088 	if (!ena_com_check_supported_feature_id(ena_dev,
3089 						ENA_ADMIN_RSS_INDIRECTION_TABLE_CONFIG)) {
3090 		ena_trc_dbg(ena_dev, "Feature %d isn't supported\n",
3091 			    ENA_ADMIN_RSS_INDIRECTION_TABLE_CONFIG);
3092 		return ENA_COM_UNSUPPORTED;
3093 	}
3094 
3095 	ret = ena_com_ind_tbl_convert_to_device(ena_dev);
3096 	if (ret) {
3097 		ena_trc_err(ena_dev, "Failed to convert host indirection table to device table\n");
3098 		return ret;
3099 	}
3100 
3101 	memset(&cmd, 0x0, sizeof(cmd));
3102 
3103 	cmd.aq_common_descriptor.opcode = ENA_ADMIN_SET_FEATURE;
3104 	cmd.aq_common_descriptor.flags =
3105 		ENA_ADMIN_AQ_COMMON_DESC_CTRL_DATA_INDIRECT_MASK;
3106 	cmd.feat_common.feature_id = ENA_ADMIN_RSS_INDIRECTION_TABLE_CONFIG;
3107 	cmd.u.ind_table.size = rss->tbl_log_size;
3108 	cmd.u.ind_table.inline_index = 0xFFFFFFFF;
3109 
3110 	ret = ena_com_mem_addr_set(ena_dev,
3111 				   &cmd.control_buffer.address,
3112 				   rss->rss_ind_tbl_dma_addr);
3113 	if (unlikely(ret)) {
3114 		ena_trc_err(ena_dev, "Memory address set failed\n");
3115 		return ret;
3116 	}
3117 
3118 	cmd.control_buffer.length = (1ULL << rss->tbl_log_size) *
3119 		sizeof(struct ena_admin_rss_ind_table_entry);
3120 
3121 	ret = ena_com_execute_admin_command(admin_queue,
3122 					    (struct ena_admin_aq_entry *)&cmd,
3123 					    sizeof(cmd),
3124 					    (struct ena_admin_acq_entry *)&resp,
3125 					    sizeof(resp));
3126 
3127 	if (unlikely(ret))
3128 		ena_trc_err(ena_dev, "Failed to set indirect table. error: %d\n", ret);
3129 
3130 	return ret;
3131 }
3132 
ena_com_indirect_table_get(struct ena_com_dev * ena_dev,u32 * ind_tbl)3133 int ena_com_indirect_table_get(struct ena_com_dev *ena_dev, u32 *ind_tbl)
3134 {
3135 	struct ena_rss *rss = &ena_dev->rss;
3136 	struct ena_admin_get_feat_resp get_resp;
3137 	u32 tbl_size;
3138 	int i, rc;
3139 
3140 	tbl_size = (1ULL << rss->tbl_log_size) *
3141 		sizeof(struct ena_admin_rss_ind_table_entry);
3142 
3143 	rc = ena_com_get_feature_ex(ena_dev, &get_resp,
3144 				    ENA_ADMIN_RSS_INDIRECTION_TABLE_CONFIG,
3145 				    rss->rss_ind_tbl_dma_addr,
3146 				    tbl_size, 0);
3147 	if (unlikely(rc))
3148 		return rc;
3149 
3150 	if (!ind_tbl)
3151 		return 0;
3152 
3153 	for (i = 0; i < (1 << rss->tbl_log_size); i++)
3154 		ind_tbl[i] = rss->host_rss_ind_tbl[i];
3155 
3156 	return 0;
3157 }
3158 
ena_com_rss_init(struct ena_com_dev * ena_dev,u16 indr_tbl_log_size)3159 int ena_com_rss_init(struct ena_com_dev *ena_dev, u16 indr_tbl_log_size)
3160 {
3161 	int rc;
3162 
3163 	memset(&ena_dev->rss, 0x0, sizeof(ena_dev->rss));
3164 
3165 	rc = ena_com_indirect_table_allocate(ena_dev, indr_tbl_log_size);
3166 	if (unlikely(rc))
3167 		goto err_indr_tbl;
3168 
3169 	/* The following function might return unsupported in case the
3170 	 * device doesn't support setting the key / hash function. We can safely
3171 	 * ignore this error and have indirection table support only.
3172 	 */
3173 	rc = ena_com_hash_key_allocate(ena_dev);
3174 	if (likely(!rc))
3175 		ena_com_hash_key_fill_default_key(ena_dev);
3176 	else if (rc != ENA_COM_UNSUPPORTED)
3177 		goto err_hash_key;
3178 
3179 	rc = ena_com_hash_ctrl_init(ena_dev);
3180 	if (unlikely(rc))
3181 		goto err_hash_ctrl;
3182 
3183 	return 0;
3184 
3185 err_hash_ctrl:
3186 	ena_com_hash_key_destroy(ena_dev);
3187 err_hash_key:
3188 	ena_com_indirect_table_destroy(ena_dev);
3189 err_indr_tbl:
3190 
3191 	return rc;
3192 }
3193 
ena_com_rss_destroy(struct ena_com_dev * ena_dev)3194 void ena_com_rss_destroy(struct ena_com_dev *ena_dev)
3195 {
3196 	ena_com_indirect_table_destroy(ena_dev);
3197 	ena_com_hash_key_destroy(ena_dev);
3198 	ena_com_hash_ctrl_destroy(ena_dev);
3199 
3200 	memset(&ena_dev->rss, 0x0, sizeof(ena_dev->rss));
3201 }
3202 
ena_com_allocate_host_info(struct ena_com_dev * ena_dev)3203 int ena_com_allocate_host_info(struct ena_com_dev *ena_dev)
3204 {
3205 	struct ena_host_attribute *host_attr = &ena_dev->host_attr;
3206 
3207 	ENA_MEM_ALLOC_COHERENT(ena_dev->dmadev,
3208 			       SZ_4K,
3209 			       host_attr->host_info,
3210 			       host_attr->host_info_dma_addr,
3211 			       host_attr->host_info_dma_handle);
3212 	if (unlikely(!host_attr->host_info))
3213 		return ENA_COM_NO_MEM;
3214 
3215 	host_attr->host_info->ena_spec_version = ((ENA_COMMON_SPEC_VERSION_MAJOR <<
3216 		ENA_REGS_VERSION_MAJOR_VERSION_SHIFT) |
3217 		(ENA_COMMON_SPEC_VERSION_MINOR));
3218 
3219 	return 0;
3220 }
3221 
ena_com_allocate_debug_area(struct ena_com_dev * ena_dev,u32 debug_area_size)3222 int ena_com_allocate_debug_area(struct ena_com_dev *ena_dev,
3223 				u32 debug_area_size)
3224 {
3225 	struct ena_host_attribute *host_attr = &ena_dev->host_attr;
3226 
3227 	ENA_MEM_ALLOC_COHERENT(ena_dev->dmadev,
3228 			       debug_area_size,
3229 			       host_attr->debug_area_virt_addr,
3230 			       host_attr->debug_area_dma_addr,
3231 			       host_attr->debug_area_dma_handle);
3232 	if (unlikely(!host_attr->debug_area_virt_addr)) {
3233 		host_attr->debug_area_size = 0;
3234 		return ENA_COM_NO_MEM;
3235 	}
3236 
3237 	host_attr->debug_area_size = debug_area_size;
3238 
3239 	return 0;
3240 }
3241 
ena_com_allocate_customer_metrics_buffer(struct ena_com_dev * ena_dev)3242 int ena_com_allocate_customer_metrics_buffer(struct ena_com_dev *ena_dev)
3243 {
3244 	struct ena_customer_metrics *customer_metrics = &ena_dev->customer_metrics;
3245 
3246 	customer_metrics->buffer_len = ENA_CUSTOMER_METRICS_BUFFER_SIZE;
3247 	customer_metrics->buffer_virt_addr = NULL;
3248 
3249 	ENA_MEM_ALLOC_COHERENT(ena_dev->dmadev,
3250 			       customer_metrics->buffer_len,
3251 			       customer_metrics->buffer_virt_addr,
3252 			       customer_metrics->buffer_dma_addr,
3253 			       customer_metrics->buffer_dma_handle);
3254 	if (unlikely(!customer_metrics->buffer_virt_addr)) {
3255 		customer_metrics->buffer_len = 0;
3256 		return ENA_COM_NO_MEM;
3257 	}
3258 
3259 	return 0;
3260 }
3261 
ena_com_delete_host_info(struct ena_com_dev * ena_dev)3262 void ena_com_delete_host_info(struct ena_com_dev *ena_dev)
3263 {
3264 	struct ena_host_attribute *host_attr = &ena_dev->host_attr;
3265 
3266 	if (host_attr->host_info) {
3267 		ENA_MEM_FREE_COHERENT(ena_dev->dmadev,
3268 				      SZ_4K,
3269 				      host_attr->host_info,
3270 				      host_attr->host_info_dma_addr,
3271 				      host_attr->host_info_dma_handle);
3272 		host_attr->host_info = NULL;
3273 	}
3274 }
3275 
ena_com_delete_debug_area(struct ena_com_dev * ena_dev)3276 void ena_com_delete_debug_area(struct ena_com_dev *ena_dev)
3277 {
3278 	struct ena_host_attribute *host_attr = &ena_dev->host_attr;
3279 
3280 	if (host_attr->debug_area_virt_addr) {
3281 		ENA_MEM_FREE_COHERENT(ena_dev->dmadev,
3282 				      host_attr->debug_area_size,
3283 				      host_attr->debug_area_virt_addr,
3284 				      host_attr->debug_area_dma_addr,
3285 				      host_attr->debug_area_dma_handle);
3286 		host_attr->debug_area_virt_addr = NULL;
3287 	}
3288 }
3289 
ena_com_delete_customer_metrics_buffer(struct ena_com_dev * ena_dev)3290 void ena_com_delete_customer_metrics_buffer(struct ena_com_dev *ena_dev)
3291 {
3292 	struct ena_customer_metrics *customer_metrics = &ena_dev->customer_metrics;
3293 
3294 	if (customer_metrics->buffer_virt_addr) {
3295 		ENA_MEM_FREE_COHERENT(ena_dev->dmadev,
3296 				      customer_metrics->buffer_len,
3297 				      customer_metrics->buffer_virt_addr,
3298 				      customer_metrics->buffer_dma_addr,
3299 				      customer_metrics->buffer_dma_handle);
3300 		customer_metrics->buffer_virt_addr = NULL;
3301 		customer_metrics->buffer_len = 0;
3302 	}
3303 }
3304 
ena_com_set_host_attributes(struct ena_com_dev * ena_dev)3305 int ena_com_set_host_attributes(struct ena_com_dev *ena_dev)
3306 {
3307 	struct ena_host_attribute *host_attr = &ena_dev->host_attr;
3308 	struct ena_com_admin_queue *admin_queue;
3309 	struct ena_admin_set_feat_cmd cmd;
3310 	struct ena_admin_set_feat_resp resp;
3311 
3312 	int ret;
3313 
3314 	/* Host attribute config is called before ena_com_get_dev_attr_feat
3315 	 * so ena_com can't check if the feature is supported.
3316 	 */
3317 
3318 	memset(&cmd, 0x0, sizeof(cmd));
3319 	admin_queue = &ena_dev->admin_queue;
3320 
3321 	cmd.aq_common_descriptor.opcode = ENA_ADMIN_SET_FEATURE;
3322 	cmd.feat_common.feature_id = ENA_ADMIN_HOST_ATTR_CONFIG;
3323 
3324 	ret = ena_com_mem_addr_set(ena_dev,
3325 				   &cmd.u.host_attr.debug_ba,
3326 				   host_attr->debug_area_dma_addr);
3327 	if (unlikely(ret)) {
3328 		ena_trc_err(ena_dev, "Memory address set failed\n");
3329 		return ret;
3330 	}
3331 
3332 	ret = ena_com_mem_addr_set(ena_dev,
3333 				   &cmd.u.host_attr.os_info_ba,
3334 				   host_attr->host_info_dma_addr);
3335 	if (unlikely(ret)) {
3336 		ena_trc_err(ena_dev, "Memory address set failed\n");
3337 		return ret;
3338 	}
3339 
3340 	cmd.u.host_attr.debug_area_size = host_attr->debug_area_size;
3341 
3342 	ret = ena_com_execute_admin_command(admin_queue,
3343 					    (struct ena_admin_aq_entry *)&cmd,
3344 					    sizeof(cmd),
3345 					    (struct ena_admin_acq_entry *)&resp,
3346 					    sizeof(resp));
3347 
3348 	if (unlikely(ret))
3349 		ena_trc_err(ena_dev, "Failed to set host attributes: %d\n", ret);
3350 
3351 	return ret;
3352 }
3353 
3354 /* Interrupt moderation */
ena_com_interrupt_moderation_supported(struct ena_com_dev * ena_dev)3355 bool ena_com_interrupt_moderation_supported(struct ena_com_dev *ena_dev)
3356 {
3357 	return ena_com_check_supported_feature_id(ena_dev,
3358 						  ENA_ADMIN_INTERRUPT_MODERATION);
3359 }
3360 
ena_com_update_nonadaptive_moderation_interval(struct ena_com_dev * ena_dev,u32 coalesce_usecs,u32 intr_delay_resolution,u32 * intr_moder_interval)3361 static int ena_com_update_nonadaptive_moderation_interval(struct ena_com_dev *ena_dev,
3362 							  u32 coalesce_usecs,
3363 							  u32 intr_delay_resolution,
3364 							  u32 *intr_moder_interval)
3365 {
3366 	if (!intr_delay_resolution) {
3367 		ena_trc_err(ena_dev, "Illegal interrupt delay granularity value\n");
3368 		return ENA_COM_FAULT;
3369 	}
3370 
3371 	*intr_moder_interval = coalesce_usecs / intr_delay_resolution;
3372 
3373 	return 0;
3374 }
3375 
ena_com_update_nonadaptive_moderation_interval_tx(struct ena_com_dev * ena_dev,u32 tx_coalesce_usecs)3376 int ena_com_update_nonadaptive_moderation_interval_tx(struct ena_com_dev *ena_dev,
3377 						      u32 tx_coalesce_usecs)
3378 {
3379 	return ena_com_update_nonadaptive_moderation_interval(ena_dev,
3380 							      tx_coalesce_usecs,
3381 							      ena_dev->intr_delay_resolution,
3382 							      &ena_dev->intr_moder_tx_interval);
3383 }
3384 
ena_com_update_nonadaptive_moderation_interval_rx(struct ena_com_dev * ena_dev,u32 rx_coalesce_usecs)3385 int ena_com_update_nonadaptive_moderation_interval_rx(struct ena_com_dev *ena_dev,
3386 						      u32 rx_coalesce_usecs)
3387 {
3388 	return ena_com_update_nonadaptive_moderation_interval(ena_dev,
3389 							      rx_coalesce_usecs,
3390 							      ena_dev->intr_delay_resolution,
3391 							      &ena_dev->intr_moder_rx_interval);
3392 }
3393 
ena_com_init_interrupt_moderation(struct ena_com_dev * ena_dev)3394 int ena_com_init_interrupt_moderation(struct ena_com_dev *ena_dev)
3395 {
3396 	struct ena_admin_get_feat_resp get_resp;
3397 	u16 delay_resolution;
3398 	int rc;
3399 
3400 	rc = ena_com_get_feature(ena_dev, &get_resp,
3401 				 ENA_ADMIN_INTERRUPT_MODERATION, 0);
3402 
3403 	if (rc) {
3404 		if (rc == ENA_COM_UNSUPPORTED) {
3405 			ena_trc_dbg(ena_dev, "Feature %d isn't supported\n",
3406 				    ENA_ADMIN_INTERRUPT_MODERATION);
3407 			rc = 0;
3408 		} else {
3409 			ena_trc_err(ena_dev,
3410 				    "Failed to get interrupt moderation admin cmd. rc: %d\n", rc);
3411 		}
3412 
3413 		/* no moderation supported, disable adaptive support */
3414 		ena_com_disable_adaptive_moderation(ena_dev);
3415 		return rc;
3416 	}
3417 
3418 	/* if moderation is supported by device we set adaptive moderation */
3419 	delay_resolution = get_resp.u.intr_moderation.intr_delay_resolution;
3420 	ena_com_update_intr_delay_resolution(ena_dev, delay_resolution);
3421 
3422 	/* Disable adaptive moderation by default - can be enabled later */
3423 	ena_com_disable_adaptive_moderation(ena_dev);
3424 
3425 	return 0;
3426 }
3427 
ena_com_get_nonadaptive_moderation_interval_tx(struct ena_com_dev * ena_dev)3428 unsigned int ena_com_get_nonadaptive_moderation_interval_tx(struct ena_com_dev *ena_dev)
3429 {
3430 	return ena_dev->intr_moder_tx_interval;
3431 }
3432 
ena_com_get_nonadaptive_moderation_interval_rx(struct ena_com_dev * ena_dev)3433 unsigned int ena_com_get_nonadaptive_moderation_interval_rx(struct ena_com_dev *ena_dev)
3434 {
3435 	return ena_dev->intr_moder_rx_interval;
3436 }
3437 
ena_com_config_dev_mode(struct ena_com_dev * ena_dev,struct ena_admin_feature_llq_desc * llq_features,struct ena_llq_configurations * llq_default_cfg)3438 int ena_com_config_dev_mode(struct ena_com_dev *ena_dev,
3439 			    struct ena_admin_feature_llq_desc *llq_features,
3440 			    struct ena_llq_configurations *llq_default_cfg)
3441 {
3442 	struct ena_com_llq_info *llq_info = &ena_dev->llq_info;
3443 	int rc;
3444 
3445 	if (!llq_features->max_llq_num) {
3446 		ena_dev->tx_mem_queue_type = ENA_ADMIN_PLACEMENT_POLICY_HOST;
3447 		return 0;
3448 	}
3449 
3450 	rc = ena_com_config_llq_info(ena_dev, llq_features, llq_default_cfg);
3451 	if (unlikely(rc))
3452 		return rc;
3453 
3454 	ena_dev->tx_max_header_size = llq_info->desc_list_entry_size -
3455 		(llq_info->descs_num_before_header * sizeof(struct ena_eth_io_tx_desc));
3456 
3457 	if (unlikely(ena_dev->tx_max_header_size == 0)) {
3458 		ena_trc_err(ena_dev, "The size of the LLQ entry is smaller than needed\n");
3459 		return ENA_COM_INVAL;
3460 	}
3461 
3462 	ena_dev->tx_mem_queue_type = ENA_ADMIN_PLACEMENT_POLICY_DEV;
3463 
3464 	return 0;
3465 }
3466