Lines Matching +full:cmd +full:- +full:timeout +full:- +full:ms

1 /*-
2 * SPDX-License-Identifier: BSD-3-Clause
4 * Copyright (c) 2015-2023 Amazon.com, Inc. or its affiliates.
39 /* Timeout in micro-sec */
94 /* Abort - canceled by the driver */
118 if (unlikely((addr & GENMASK_ULL(ena_dev->dma_addr_bits - 1, 0)) != addr)) {
123 ena_addr->mem_addr_low = lower_32_bits(addr);
124 ena_addr->mem_addr_high = (u16)upper_32_bits(addr);
131 struct ena_com_dev *ena_dev = admin_queue->ena_dev;
132 struct ena_com_admin_sq *sq = &admin_queue->sq;
133 u16 size = ADMIN_SQ_SIZE(admin_queue->q_depth);
135 ENA_MEM_ALLOC_COHERENT(admin_queue->q_dmadev, size, sq->entries, sq->dma_addr,
136 sq->mem_handle);
138 if (unlikely(!sq->entries)) {
143 sq->head = 0;
144 sq->tail = 0;
145 sq->phase = 1;
147 sq->db_addr = NULL;
154 struct ena_com_dev *ena_dev = admin_queue->ena_dev;
155 struct ena_com_admin_cq *cq = &admin_queue->cq;
156 u16 size = ADMIN_CQ_SIZE(admin_queue->q_depth);
158 ENA_MEM_ALLOC_COHERENT(admin_queue->q_dmadev, size, cq->entries, cq->dma_addr,
159 cq->mem_handle);
161 if (unlikely(!cq->entries)) {
166 cq->head = 0;
167 cq->phase = 1;
175 struct ena_com_aenq *aenq = &ena_dev->aenq;
179 ena_dev->aenq.q_depth = ENA_ASYNC_QUEUE_DEPTH;
181 ENA_MEM_ALLOC_COHERENT(ena_dev->dmadev, size,
182 aenq->entries,
183 aenq->dma_addr,
184 aenq->mem_handle);
186 if (unlikely(!aenq->entries)) {
191 aenq->head = aenq->q_depth;
192 aenq->phase = 1;
194 addr_low = ENA_DMA_ADDR_TO_UINT32_LOW(aenq->dma_addr);
195 addr_high = ENA_DMA_ADDR_TO_UINT32_HIGH(aenq->dma_addr);
197 ENA_REG_WRITE32(ena_dev->bus, addr_low, ena_dev->reg_bar + ENA_REGS_AENQ_BASE_LO_OFF);
198 ENA_REG_WRITE32(ena_dev->bus, addr_high, ena_dev->reg_bar + ENA_REGS_AENQ_BASE_HI_OFF);
201 aenq_caps |= ena_dev->aenq.q_depth & ENA_REGS_AENQ_CAPS_AENQ_DEPTH_MASK;
205 ENA_REG_WRITE32(ena_dev->bus, aenq_caps, ena_dev->reg_bar + ENA_REGS_AENQ_CAPS_OFF);
212 aenq->aenq_handlers = aenq_handlers;
220 comp_ctx->user_cqe = NULL;
221 comp_ctx->occupied = false;
222 ATOMIC32_DEC(&queue->outstanding_cmds);
228 if (unlikely(command_id >= admin_queue->q_depth)) {
229 ena_trc_err(admin_queue->ena_dev,
231 command_id, admin_queue->q_depth);
235 if (unlikely(!admin_queue->comp_ctx)) {
236 ena_trc_err(admin_queue->ena_dev,
241 if (unlikely(admin_queue->comp_ctx[command_id].occupied && capture)) {
242 ena_trc_err(admin_queue->ena_dev,
248 ATOMIC32_INC(&admin_queue->outstanding_cmds);
249 admin_queue->comp_ctx[command_id].occupied = true;
252 return &admin_queue->comp_ctx[command_id];
256 struct ena_admin_aq_entry *cmd,
266 queue_size_mask = admin_queue->q_depth - 1;
268 tail_masked = admin_queue->sq.tail & queue_size_mask;
271 cnt = (u16)ATOMIC32_READ(&admin_queue->outstanding_cmds);
272 if (unlikely(cnt >= admin_queue->q_depth)) {
273 ena_trc_dbg(admin_queue->ena_dev, "Admin queue is full.\n");
274 admin_queue->stats.out_of_space++;
278 cmd_id = admin_queue->curr_cmd_id;
280 cmd->aq_common_descriptor.flags |= admin_queue->sq.phase &
283 cmd->aq_common_descriptor.command_id |= cmd_id &
290 comp_ctx->status = ENA_CMD_SUBMITTED;
291 comp_ctx->comp_size = (u32)comp_size_in_bytes;
292 comp_ctx->user_cqe = comp;
293 comp_ctx->cmd_opcode = cmd->aq_common_descriptor.opcode;
295 ENA_WAIT_EVENT_CLEAR(comp_ctx->wait_event);
297 memcpy(&admin_queue->sq.entries[tail_masked], cmd, cmd_size_in_bytes);
299 admin_queue->curr_cmd_id = (admin_queue->curr_cmd_id + 1) &
302 admin_queue->sq.tail++;
303 admin_queue->stats.submitted_cmd++;
305 if (unlikely((admin_queue->sq.tail & queue_size_mask) == 0))
306 admin_queue->sq.phase = !admin_queue->sq.phase;
308 ENA_DB_SYNC(&admin_queue->sq.mem_handle);
309 ENA_REG_WRITE32(admin_queue->bus, admin_queue->sq.tail,
310 admin_queue->sq.db_addr);
317 struct ena_com_dev *ena_dev = admin_queue->ena_dev;
318 size_t size = admin_queue->q_depth * sizeof(struct ena_comp_ctx);
322 admin_queue->comp_ctx = ENA_MEM_ALLOC(admin_queue->q_dmadev, size);
323 if (unlikely(!admin_queue->comp_ctx)) {
328 for (i = 0; i < admin_queue->q_depth; i++) {
331 ENA_WAIT_EVENT_INIT(comp_ctx->wait_event);
338 struct ena_admin_aq_entry *cmd,
346 ENA_SPINLOCK_LOCK(admin_queue->q_lock, flags);
347 if (unlikely(!admin_queue->running_state)) {
348 ENA_SPINLOCK_UNLOCK(admin_queue->q_lock, flags);
351 comp_ctx = __ena_com_submit_admin_cmd(admin_queue, cmd,
356 admin_queue->running_state = false;
357 ENA_SPINLOCK_UNLOCK(admin_queue->q_lock, flags);
369 memset(&io_sq->desc_addr, 0x0, sizeof(io_sq->desc_addr));
371 io_sq->dma_addr_bits = (u8)ena_dev->dma_addr_bits;
372 io_sq->desc_entry_size =
373 (io_sq->direction == ENA_COM_IO_QUEUE_DIRECTION_TX) ?
377 size = io_sq->desc_entry_size * io_sq->q_depth;
378 io_sq->bus = ena_dev->bus;
380 if (io_sq->mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_HOST) {
381 ENA_MEM_ALLOC_COHERENT_NODE(ena_dev->dmadev,
383 io_sq->desc_addr.virt_addr,
384 io_sq->desc_addr.phys_addr,
385 io_sq->desc_addr.mem_handle,
386 ctx->numa_node,
388 if (!io_sq->desc_addr.virt_addr) {
389 ENA_MEM_ALLOC_COHERENT(ena_dev->dmadev,
391 io_sq->desc_addr.virt_addr,
392 io_sq->desc_addr.phys_addr,
393 io_sq->desc_addr.mem_handle);
396 if (unlikely(!io_sq->desc_addr.virt_addr)) {
402 if (io_sq->mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_DEV) {
404 io_sq->bounce_buf_ctrl.buffer_size =
405 ena_dev->llq_info.desc_list_entry_size;
406 io_sq->bounce_buf_ctrl.buffers_num =
408 io_sq->bounce_buf_ctrl.next_to_use = 0;
410 size = (size_t)io_sq->bounce_buf_ctrl.buffer_size *
411 io_sq->bounce_buf_ctrl.buffers_num;
413 ENA_MEM_ALLOC_NODE(ena_dev->dmadev,
415 io_sq->bounce_buf_ctrl.base_buffer,
416 ctx->numa_node,
418 if (!io_sq->bounce_buf_ctrl.base_buffer)
419 io_sq->bounce_buf_ctrl.base_buffer = ENA_MEM_ALLOC(ena_dev->dmadev, size);
421 if (unlikely(!io_sq->bounce_buf_ctrl.base_buffer)) {
426 memcpy(&io_sq->llq_info, &ena_dev->llq_info,
427 sizeof(io_sq->llq_info));
430 io_sq->llq_buf_ctrl.curr_bounce_buf =
431 ena_com_get_next_bounce_buffer(&io_sq->bounce_buf_ctrl);
432 memset(io_sq->llq_buf_ctrl.curr_bounce_buf,
433 0x0, io_sq->llq_info.desc_list_entry_size);
434 io_sq->llq_buf_ctrl.descs_left_in_line =
435 io_sq->llq_info.descs_num_before_header;
436 io_sq->disable_meta_caching =
437 io_sq->llq_info.disable_meta_caching;
439 if (io_sq->llq_info.max_entries_in_tx_burst > 0)
440 io_sq->entries_in_tx_burst_left =
441 io_sq->llq_info.max_entries_in_tx_burst;
444 io_sq->tail = 0;
445 io_sq->next_to_comp = 0;
446 io_sq->phase = 1;
458 memset(&io_cq->cdesc_addr, 0x0, sizeof(io_cq->cdesc_addr));
461 io_cq->cdesc_entry_size_in_bytes =
462 (io_cq->direction == ENA_COM_IO_QUEUE_DIRECTION_TX) ?
466 size = io_cq->cdesc_entry_size_in_bytes * io_cq->q_depth;
467 io_cq->bus = ena_dev->bus;
469 ENA_MEM_ALLOC_COHERENT_NODE_ALIGNED(ena_dev->dmadev,
471 io_cq->cdesc_addr.virt_addr,
472 io_cq->cdesc_addr.phys_addr,
473 io_cq->cdesc_addr.mem_handle,
474 ctx->numa_node,
477 if (!io_cq->cdesc_addr.virt_addr) {
478 ENA_MEM_ALLOC_COHERENT_ALIGNED(ena_dev->dmadev,
480 io_cq->cdesc_addr.virt_addr,
481 io_cq->cdesc_addr.phys_addr,
482 io_cq->cdesc_addr.mem_handle,
486 if (unlikely(!io_cq->cdesc_addr.virt_addr)) {
491 io_cq->phase = 1;
492 io_cq->head = 0;
503 cmd_id = cqe->acq_common_descriptor.command &
508 ena_trc_err(admin_queue->ena_dev,
510 admin_queue->running_state = false;
514 if (!comp_ctx->occupied)
517 comp_ctx->status = ENA_CMD_COMPLETED;
518 comp_ctx->comp_status = cqe->acq_common_descriptor.status;
520 if (comp_ctx->user_cqe)
521 memcpy(comp_ctx->user_cqe, (void *)cqe, comp_ctx->comp_size);
523 if (!admin_queue->polling)
524 ENA_WAIT_EVENT_SIGNAL(comp_ctx->wait_event);
534 head_masked = admin_queue->cq.head & (admin_queue->q_depth - 1);
535 phase = admin_queue->cq.phase;
537 cqe = &admin_queue->cq.entries[head_masked];
540 while ((READ_ONCE8(cqe->acq_common_descriptor.flags) &
550 if (unlikely(head_masked == admin_queue->q_depth)) {
555 cqe = &admin_queue->cq.entries[head_masked];
558 admin_queue->cq.head += comp_num;
559 admin_queue->cq.phase = phase;
560 admin_queue->sq.head += comp_num;
561 admin_queue->stats.completed_cmd += comp_num;
568 ena_trc_err(admin_queue->ena_dev,
602 ena_time_t timeout;
606 timeout = ENA_GET_SYSTEM_TIMEOUT(admin_queue->completion_timeout);
609 ENA_SPINLOCK_LOCK(admin_queue->q_lock, flags);
611 ENA_SPINLOCK_UNLOCK(admin_queue->q_lock, flags);
613 if (comp_ctx->status != ENA_CMD_SUBMITTED)
616 if (unlikely(ENA_TIME_EXPIRE(timeout))) {
617 ena_trc_err(admin_queue->ena_dev,
618 "Wait for completion (polling) timeout\n");
620 ENA_SPINLOCK_LOCK(admin_queue->q_lock, flags);
621 admin_queue->stats.no_completion++;
622 admin_queue->running_state = false;
623 ENA_SPINLOCK_UNLOCK(admin_queue->q_lock, flags);
630 admin_queue->ena_dev->ena_min_poll_delay_us);
633 if (unlikely(comp_ctx->status == ENA_CMD_ABORTED)) {
634 ena_trc_err(admin_queue->ena_dev, "Command was aborted\n");
635 ENA_SPINLOCK_LOCK(admin_queue->q_lock, flags);
636 admin_queue->stats.aborted_cmd++;
637 ENA_SPINLOCK_UNLOCK(admin_queue->q_lock, flags);
642 ENA_WARN(comp_ctx->status != ENA_CMD_COMPLETED,
643 admin_queue->ena_dev, "Invalid comp status %d\n",
644 comp_ctx->status);
646 ret = ena_com_comp_status_to_errno(admin_queue, comp_ctx->comp_status);
661 struct ena_admin_set_feat_cmd cmd;
663 struct ena_com_llq_info *llq_info = &ena_dev->llq_info;
666 memset(&cmd, 0x0, sizeof(cmd));
667 admin_queue = &ena_dev->admin_queue;
669 cmd.aq_common_descriptor.opcode = ENA_ADMIN_SET_FEATURE;
670 cmd.feat_common.feature_id = ENA_ADMIN_LLQ;
672 cmd.u.llq.header_location_ctrl_enabled = llq_info->header_location_ctrl;
673 cmd.u.llq.entry_size_ctrl_enabled = llq_info->desc_list_entry_size_ctrl;
674 cmd.u.llq.desc_num_before_header_enabled = llq_info->descs_num_before_header;
675 cmd.u.llq.descriptors_stride_ctrl_enabled = llq_info->desc_stride_ctrl;
677 cmd.u.llq.accel_mode.u.set.enabled_flags =
682 (struct ena_admin_aq_entry *)&cmd,
683 sizeof(cmd),
697 struct ena_com_llq_info *llq_info = &ena_dev->llq_info;
704 supported_feat = llq_features->header_location_ctrl_supported;
706 if (likely(supported_feat & llq_default_cfg->llq_header_location)) {
707 llq_info->header_location_ctrl =
708 llq_default_cfg->llq_header_location;
715 if (likely(llq_info->header_location_ctrl == ENA_ADMIN_INLINE_HEADER)) {
716 supported_feat = llq_features->descriptors_stride_ctrl_supported;
717 if (likely(supported_feat & llq_default_cfg->llq_stride_ctrl)) {
718 llq_info->desc_stride_ctrl = llq_default_cfg->llq_stride_ctrl;
721 llq_info->desc_stride_ctrl = ENA_ADMIN_MULTIPLE_DESCS_PER_ENTRY;
723 llq_info->desc_stride_ctrl = ENA_ADMIN_SINGLE_DESC_PER_ENTRY;
731 llq_default_cfg->llq_stride_ctrl,
733 llq_info->desc_stride_ctrl);
736 llq_info->desc_stride_ctrl = 0;
739 supported_feat = llq_features->entry_size_ctrl_supported;
740 if (likely(supported_feat & llq_default_cfg->llq_ring_entry_size)) {
741 llq_info->desc_list_entry_size_ctrl = llq_default_cfg->llq_ring_entry_size;
742 llq_info->desc_list_entry_size = llq_default_cfg->llq_ring_entry_size_value;
745 llq_info->desc_list_entry_size_ctrl = ENA_ADMIN_LIST_ENTRY_SIZE_128B;
746 llq_info->desc_list_entry_size = 128;
748 llq_info->desc_list_entry_size_ctrl = ENA_ADMIN_LIST_ENTRY_SIZE_192B;
749 llq_info->desc_list_entry_size = 192;
751 llq_info->desc_list_entry_size_ctrl = ENA_ADMIN_LIST_ENTRY_SIZE_256B;
752 llq_info->desc_list_entry_size = 256;
760 llq_default_cfg->llq_ring_entry_size,
762 llq_info->desc_list_entry_size);
764 if (unlikely(llq_info->desc_list_entry_size & 0x7)) {
769 llq_info->desc_list_entry_size);
773 if (llq_info->desc_stride_ctrl == ENA_ADMIN_MULTIPLE_DESCS_PER_ENTRY)
774 llq_info->descs_per_entry = llq_info->desc_list_entry_size /
777 llq_info->descs_per_entry = 1;
779 supported_feat = llq_features->desc_num_before_header_supported;
780 if (likely(supported_feat & llq_default_cfg->llq_num_decs_before_header)) {
781 llq_info->descs_num_before_header = llq_default_cfg->llq_num_decs_before_header;
784 llq_info->descs_num_before_header = ENA_ADMIN_LLQ_NUM_DESCS_BEFORE_HEADER_2;
786 llq_info->descs_num_before_header = ENA_ADMIN_LLQ_NUM_DESCS_BEFORE_HEADER_1;
788 llq_info->descs_num_before_header = ENA_ADMIN_LLQ_NUM_DESCS_BEFORE_HEADER_4;
790 llq_info->descs_num_before_header = ENA_ADMIN_LLQ_NUM_DESCS_BEFORE_HEADER_8;
798 llq_default_cfg->llq_num_decs_before_header,
800 llq_info->descs_num_before_header);
803 llq_accel_mode_get = llq_features->accel_mode.u.get;
805 llq_info->disable_meta_caching =
810 llq_info->max_entries_in_tx_burst =
812 llq_default_cfg->llq_ring_entry_size_value;
827 ENA_WAIT_EVENT_WAIT(comp_ctx->wait_event,
828 admin_queue->completion_timeout);
832 * 1) No completion (timeout reached)
833 * 2) There is completion but the device didn't get any msi-x interrupt.
835 if (unlikely(comp_ctx->status == ENA_CMD_SUBMITTED)) {
836 ENA_SPINLOCK_LOCK(admin_queue->q_lock, flags);
838 admin_queue->stats.no_completion++;
839 ENA_SPINLOCK_UNLOCK(admin_queue->q_lock, flags);
841 if (comp_ctx->status == ENA_CMD_COMPLETED) {
842 admin_queue->is_missing_admin_interrupt = true;
843 ena_trc_err(admin_queue->ena_dev,
844 "The ena device sent a completion but the driver didn't receive a MSI-X interrupt (cmd %d), autopolling mode is %s\n",
845 comp_ctx->cmd_opcode, admin_queue->auto_polling ? "ON" : "OFF");
847 if (admin_queue->auto_polling)
848 admin_queue->polling = true;
850 ena_trc_err(admin_queue->ena_dev,
851 "The ena device didn't send a completion for the admin cmd %d status %d\n",
852 comp_ctx->cmd_opcode, comp_ctx->status);
858 if (!admin_queue->polling) {
859 admin_queue->running_state = false;
863 } else if (unlikely(comp_ctx->status == ENA_CMD_ABORTED)) {
864 ena_trc_err(admin_queue->ena_dev, "Command was aborted\n");
865 ENA_SPINLOCK_LOCK(admin_queue->q_lock, flags);
866 admin_queue->stats.aborted_cmd++;
867 ENA_SPINLOCK_UNLOCK(admin_queue->q_lock, flags);
872 ENA_WARN(comp_ctx->status != ENA_CMD_COMPLETED,
873 admin_queue->ena_dev, "Invalid comp status %d\n",
874 comp_ctx->status);
876 ret = ena_com_comp_status_to_errno(admin_queue, comp_ctx->comp_status);
884 * On timeout the function will return ENA_MMIO_READ_TIMEOUT
888 struct ena_com_mmio_read *mmio_read = &ena_dev->mmio_read;
890 mmio_read->read_resp;
893 u32 timeout = mmio_read->reg_read_to;
897 if (timeout == 0)
898 timeout = ENA_REG_READ_TIMEOUT;
901 if (!mmio_read->readless_supported)
902 return ENA_REG_READ32(ena_dev->bus, ena_dev->reg_bar + offset);
904 ENA_SPINLOCK_LOCK(mmio_read->lock, flags);
905 mmio_read->seq_num++;
907 read_resp->req_id = mmio_read->seq_num + 0xDEAD;
910 mmio_read_reg |= mmio_read->seq_num &
913 ENA_REG_WRITE32(ena_dev->bus, mmio_read_reg,
914 ena_dev->reg_bar + ENA_REGS_MMIO_REG_READ_OFF);
916 for (i = 0; i < timeout; i++) {
917 if (READ_ONCE16(read_resp->req_id) == mmio_read->seq_num)
923 if (unlikely(i == timeout)) {
924 ena_trc_err(ena_dev, "Reading reg failed for timeout. expected: req id[%u] offset[%u] actual: req id[%u] offset[%u]\n",
925 mmio_read->seq_num,
927 read_resp->req_id,
928 read_resp->reg_off);
933 if (unlikely(read_resp->reg_off != offset)) {
937 ret = read_resp->reg_val;
940 ENA_SPINLOCK_UNLOCK(mmio_read->lock, flags);
946 * Polling mode - wait until the completion is available.
947 * Async mode - wait on wait queue until the completion is ready
948 * (or the timeout expired).
955 if (admin_queue->polling)
966 struct ena_com_admin_queue *admin_queue = &ena_dev->admin_queue;
974 if (io_sq->direction == ENA_COM_IO_QUEUE_DIRECTION_TX)
983 destroy_cmd.sq.sq_idx = io_sq->idx;
1004 if (io_cq->cdesc_addr.virt_addr) {
1005 size = io_cq->cdesc_entry_size_in_bytes * io_cq->q_depth;
1007 ENA_MEM_FREE_COHERENT(ena_dev->dmadev,
1009 io_cq->cdesc_addr.virt_addr,
1010 io_cq->cdesc_addr.phys_addr,
1011 io_cq->cdesc_addr.mem_handle);
1013 io_cq->cdesc_addr.virt_addr = NULL;
1016 if (io_sq->desc_addr.virt_addr) {
1017 size = io_sq->desc_entry_size * io_sq->q_depth;
1019 ENA_MEM_FREE_COHERENT(ena_dev->dmadev,
1021 io_sq->desc_addr.virt_addr,
1022 io_sq->desc_addr.phys_addr,
1023 io_sq->desc_addr.mem_handle);
1025 io_sq->desc_addr.virt_addr = NULL;
1028 if (io_sq->bounce_buf_ctrl.base_buffer) {
1029 ENA_MEM_FREE(ena_dev->dmadev,
1030 io_sq->bounce_buf_ctrl.base_buffer,
1031 (io_sq->llq_info.desc_list_entry_size * ENA_COM_BOUNCE_BUFFER_CNTRL_CNT));
1032 io_sq->bounce_buf_ctrl.base_buffer = NULL;
1036 static int wait_for_reset_state(struct ena_com_dev *ena_dev, u32 timeout,
1042 /* Convert timeout from resolution of 100ms to us resolution. */
1043 timeout_stamp = ENA_GET_SYSTEM_TIMEOUT(100 * 1000 * timeout);
1049 ena_trc_err(ena_dev, "Reg read timeout occurred\n");
1060 ena_delay_exponential_backoff_us(exp++, ena_dev->ena_min_poll_delay_us);
1071 !(ena_dev->supported_features & feature_mask))
1094 admin_queue = &ena_dev->admin_queue;
1146 return ena_dev->rss.hash_func;
1152 (ena_dev->rss).hash_key;
1154 ENA_RSS_FILL_KEY(&hash_key->key, sizeof(hash_key->key));
1158 hash_key->key_parts = ENA_ADMIN_RSS_KEY_PARTS;
1163 struct ena_rss *rss = &ena_dev->rss;
1168 ENA_MEM_ALLOC_COHERENT(ena_dev->dmadev,
1169 sizeof(*rss->hash_key),
1170 rss->hash_key,
1171 rss->hash_key_dma_addr,
1172 rss->hash_key_mem_handle);
1174 if (unlikely(!rss->hash_key))
1182 struct ena_rss *rss = &ena_dev->rss;
1184 if (rss->hash_key)
1185 ENA_MEM_FREE_COHERENT(ena_dev->dmadev,
1186 sizeof(*rss->hash_key),
1187 rss->hash_key,
1188 rss->hash_key_dma_addr,
1189 rss->hash_key_mem_handle);
1190 rss->hash_key = NULL;
1195 struct ena_rss *rss = &ena_dev->rss;
1197 ENA_MEM_ALLOC_COHERENT(ena_dev->dmadev,
1198 sizeof(*rss->hash_ctrl),
1199 rss->hash_ctrl,
1200 rss->hash_ctrl_dma_addr,
1201 rss->hash_ctrl_mem_handle);
1203 if (unlikely(!rss->hash_ctrl))
1211 struct ena_rss *rss = &ena_dev->rss;
1213 if (rss->hash_ctrl)
1214 ENA_MEM_FREE_COHERENT(ena_dev->dmadev,
1215 sizeof(*rss->hash_ctrl),
1216 rss->hash_ctrl,
1217 rss->hash_ctrl_dma_addr,
1218 rss->hash_ctrl_mem_handle);
1219 rss->hash_ctrl = NULL;
1225 struct ena_rss *rss = &ena_dev->rss;
1247 ENA_MEM_ALLOC_COHERENT(ena_dev->dmadev,
1249 rss->rss_ind_tbl,
1250 rss->rss_ind_tbl_dma_addr,
1251 rss->rss_ind_tbl_mem_handle);
1252 if (unlikely(!rss->rss_ind_tbl))
1256 rss->host_rss_ind_tbl =
1257 ENA_MEM_ALLOC(ena_dev->dmadev, tbl_size);
1258 if (unlikely(!rss->host_rss_ind_tbl))
1261 rss->tbl_log_size = log_size;
1269 ENA_MEM_FREE_COHERENT(ena_dev->dmadev,
1271 rss->rss_ind_tbl,
1272 rss->rss_ind_tbl_dma_addr,
1273 rss->rss_ind_tbl_mem_handle);
1274 rss->rss_ind_tbl = NULL;
1276 rss->tbl_log_size = 0;
1282 struct ena_rss *rss = &ena_dev->rss;
1283 size_t tbl_size = (1ULL << rss->tbl_log_size) *
1286 if (rss->rss_ind_tbl)
1287 ENA_MEM_FREE_COHERENT(ena_dev->dmadev,
1289 rss->rss_ind_tbl,
1290 rss->rss_ind_tbl_dma_addr,
1291 rss->rss_ind_tbl_mem_handle);
1292 rss->rss_ind_tbl = NULL;
1294 if (rss->host_rss_ind_tbl)
1295 ENA_MEM_FREE(ena_dev->dmadev,
1296 rss->host_rss_ind_tbl,
1297 ((1ULL << rss->tbl_log_size) * sizeof(u16)));
1298 rss->host_rss_ind_tbl = NULL;
1304 struct ena_com_admin_queue *admin_queue = &ena_dev->admin_queue;
1314 if (io_sq->direction == ENA_COM_IO_QUEUE_DIRECTION_TX)
1323 create_cmd.sq_caps_2 |= io_sq->mem_queue_type &
1334 create_cmd.sq_depth = io_sq->q_depth;
1336 if (io_sq->mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_HOST) {
1339 io_sq->desc_addr.phys_addr);
1356 io_sq->idx = cmd_completion.sq_idx;
1358 io_sq->db_addr = (u32 __iomem *)((uintptr_t)ena_dev->reg_bar +
1361 if (io_sq->mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_DEV) {
1362 io_sq->desc_addr.pbuf_dev_addr =
1363 (u8 __iomem *)((uintptr_t)ena_dev->mem_bar +
1367 ena_trc_dbg(ena_dev, "Created sq[%u], depth[%u]\n", io_sq->idx, io_sq->q_depth);
1374 struct ena_rss *rss = &ena_dev->rss;
1379 for (i = 0; i < 1 << rss->tbl_log_size; i++) {
1380 qid = rss->host_rss_ind_tbl[i];
1384 io_sq = &ena_dev->io_sq_queues[qid];
1386 if (io_sq->direction != ENA_COM_IO_QUEUE_DIRECTION_RX)
1389 rss->rss_ind_tbl[i].cq_idx = io_sq->idx;
1398 u16 prev_intr_delay_resolution = ena_dev->intr_delay_resolution;
1406 ena_dev->intr_moder_rx_interval =
1407 ena_dev->intr_moder_rx_interval *
1412 ena_dev->intr_moder_tx_interval =
1413 ena_dev->intr_moder_tx_interval *
1417 ena_dev->intr_delay_resolution = intr_delay_resolution;
1425 struct ena_admin_aq_entry *cmd,
1433 comp_ctx = ena_com_submit_admin_cmd(admin_queue, cmd, cmd_size,
1438 ena_trc_dbg(admin_queue->ena_dev,
1442 ena_trc_err(admin_queue->ena_dev,
1451 if (admin_queue->running_state)
1452 ena_trc_err(admin_queue->ena_dev,
1455 ena_trc_dbg(admin_queue->ena_dev,
1464 struct ena_com_admin_queue *admin_queue = &ena_dev->admin_queue;
1473 create_cmd.cq_caps_2 |= (io_cq->cdesc_entry_size_in_bytes / 4) &
1478 create_cmd.msix_vector = io_cq->msix_vector;
1479 create_cmd.cq_depth = io_cq->q_depth;
1483 io_cq->cdesc_addr.phys_addr);
1499 io_cq->idx = cmd_completion.cq_idx;
1501 io_cq->unmask_reg = (u32 __iomem *)((uintptr_t)ena_dev->reg_bar +
1505 io_cq->numa_node_cfg_reg =
1506 (u32 __iomem *)((uintptr_t)ena_dev->reg_bar +
1509 ena_trc_dbg(ena_dev, "Created cq[%u], depth[%u]\n", io_cq->idx, io_cq->q_depth);
1524 *io_sq = &ena_dev->io_sq_queues[qid];
1525 *io_cq = &ena_dev->io_cq_queues[qid];
1532 struct ena_com_admin_queue *admin_queue = &ena_dev->admin_queue;
1536 if (!admin_queue->comp_ctx)
1539 for (i = 0; i < admin_queue->q_depth; i++) {
1544 comp_ctx->status = ENA_CMD_ABORTED;
1546 ENA_WAIT_EVENT_SIGNAL(comp_ctx->wait_event);
1552 struct ena_com_admin_queue *admin_queue = &ena_dev->admin_queue;
1556 ENA_SPINLOCK_LOCK(admin_queue->q_lock, flags);
1557 while (ATOMIC32_READ(&admin_queue->outstanding_cmds) != 0) {
1558 ENA_SPINLOCK_UNLOCK(admin_queue->q_lock, flags);
1559 ena_delay_exponential_backoff_us(exp++, ena_dev->ena_min_poll_delay_us);
1560 ENA_SPINLOCK_LOCK(admin_queue->q_lock, flags);
1562 ENA_SPINLOCK_UNLOCK(admin_queue->q_lock, flags);
1568 struct ena_com_admin_queue *admin_queue = &ena_dev->admin_queue;
1575 destroy_cmd.cq_idx = io_cq->idx;
1592 return ena_dev->admin_queue.running_state;
1597 struct ena_com_admin_queue *admin_queue = &ena_dev->admin_queue;
1600 ENA_SPINLOCK_LOCK(admin_queue->q_lock, flags);
1601 ena_dev->admin_queue.running_state = state;
1602 ENA_SPINLOCK_UNLOCK(admin_queue->q_lock, flags);
1607 u16 depth = ena_dev->aenq.q_depth;
1609 ENA_WARN(ena_dev->aenq.head != depth, ena_dev, "Invalid AENQ state\n");
1614 ENA_REG_WRITE32(ena_dev->bus, depth, ena_dev->reg_bar + ENA_REGS_AENQ_HEAD_DB_OFF);
1620 struct ena_admin_set_feat_cmd cmd;
1638 memset(&cmd, 0x0, sizeof(cmd));
1639 admin_queue = &ena_dev->admin_queue;
1641 cmd.aq_common_descriptor.opcode = ENA_ADMIN_SET_FEATURE;
1642 cmd.aq_common_descriptor.flags = 0;
1643 cmd.feat_common.feature_id = ENA_ADMIN_AENQ_CONFIG;
1644 cmd.u.aenq.enabled_groups = groups_flag;
1647 (struct ena_admin_aq_entry *)&cmd,
1648 sizeof(cmd),
1664 ena_trc_err(ena_dev, "Reg read timeout occurred\n");
1678 ena_dev->dma_addr_bits = width;
1698 ena_trc_err(ena_dev, "Reg read timeout occurred\n");
1724 return -1;
1735 if (!admin_queue->comp_ctx)
1739 ENA_MEM_FREE(ena_dev->dmadev,
1740 admin_queue->comp_ctx,
1741 (admin_queue->q_depth * sizeof(struct ena_comp_ctx)));
1743 admin_queue->comp_ctx = NULL;
1748 struct ena_com_admin_queue *admin_queue = &ena_dev->admin_queue;
1749 struct ena_com_admin_cq *cq = &admin_queue->cq;
1750 struct ena_com_admin_sq *sq = &admin_queue->sq;
1751 struct ena_com_aenq *aenq = &ena_dev->aenq;
1756 size = ADMIN_SQ_SIZE(admin_queue->q_depth);
1757 if (sq->entries)
1758 ENA_MEM_FREE_COHERENT(ena_dev->dmadev, size, sq->entries,
1759 sq->dma_addr, sq->mem_handle);
1760 sq->entries = NULL;
1762 size = ADMIN_CQ_SIZE(admin_queue->q_depth);
1763 if (cq->entries)
1764 ENA_MEM_FREE_COHERENT(ena_dev->dmadev, size, cq->entries,
1765 cq->dma_addr, cq->mem_handle);
1766 cq->entries = NULL;
1768 size = ADMIN_AENQ_SIZE(aenq->q_depth);
1769 if (ena_dev->aenq.entries)
1770 ENA_MEM_FREE_COHERENT(ena_dev->dmadev, size, aenq->entries,
1771 aenq->dma_addr, aenq->mem_handle);
1772 aenq->entries = NULL;
1773 ENA_SPINLOCK_DESTROY(admin_queue->q_lock);
1783 ENA_REG_WRITE32(ena_dev->bus, mask_value,
1784 ena_dev->reg_bar + ENA_REGS_INTR_MASK_OFF);
1785 ena_dev->admin_queue.polling = polling;
1790 return ena_dev->admin_queue.polling;
1796 ena_dev->admin_queue.auto_polling = polling;
1806 struct ena_com_phc_info *phc = &ena_dev->phc;
1811 ENA_MEM_ALLOC_COHERENT(ena_dev->dmadev,
1812 sizeof(*phc->virt_addr),
1813 phc->virt_addr,
1814 phc->phys_addr,
1815 phc->mem_handle);
1816 if (unlikely(!phc->virt_addr))
1819 ENA_SPINLOCK_INIT(phc->lock);
1821 phc->virt_addr->req_id = 0;
1822 phc->virt_addr->timestamp = 0;
1829 struct ena_com_phc_info *phc = &ena_dev->phc;
1854 phc->doorbell_offset = get_feat_resp.u.phc.doorbell_offset;
1856 /* Update PHC expire timeout according to device or default driver value */
1857 phc->expire_timeout_usec = (get_feat_resp.u.phc.expire_timeout_usec) ?
1861 /* Update PHC block timeout according to device or default driver value */
1862 phc->block_timeout_usec = (get_feat_resp.u.phc.block_timeout_usec) ?
1866 /* Sanity check - expire timeout must not exceed block timeout */
1867 if (phc->expire_timeout_usec > phc->block_timeout_usec)
1868 phc->expire_timeout_usec = phc->block_timeout_usec;
1874 set_feat_cmd.u.phc.output_length = sizeof(*phc->virt_addr);
1875 ret = ena_com_mem_addr_set(ena_dev, &set_feat_cmd.u.phc.output_address, phc->phys_addr);
1882 ret = ena_com_execute_admin_command(&ena_dev->admin_queue,
1893 phc->active = true;
1901 struct ena_com_phc_info *phc = &ena_dev->phc;
1905 if (!phc->virt_addr)
1908 ENA_SPINLOCK_LOCK(phc->lock, flags);
1909 phc->active = false;
1910 ENA_SPINLOCK_UNLOCK(phc->lock, flags);
1912 ENA_MEM_FREE_COHERENT(ena_dev->dmadev,
1913 sizeof(*phc->virt_addr),
1914 phc->virt_addr,
1915 phc->phys_addr,
1916 phc->mem_handle);
1917 phc->virt_addr = NULL;
1919 ENA_SPINLOCK_DESTROY(phc->lock);
1924 volatile struct ena_admin_phc_resp *read_resp = ena_dev->phc.virt_addr;
1926 struct ena_com_phc_info *phc = &ena_dev->phc;
1932 if (!phc->active) {
1937 ENA_SPINLOCK_LOCK(phc->lock, flags);
1940 if (unlikely(ENA_TIME_COMPARE_HIGH_RES(phc->system_time, zero_system_time))) {
1942 block_time = ENA_GET_SYSTEM_TIMEOUT_HIGH_RES(phc->system_time,
1943 phc->block_timeout_usec);
1946 phc->stats.phc_skp++;
1952 if ((READ_ONCE16(read_resp->req_id) != phc->req_id) ||
1953 (read_resp->error_flags & ENA_PHC_ERROR_FLAGS)) {
1957 phc->stats.phc_err++;
1960 phc->stats.phc_exp++;
1965 phc->system_time = ENA_GET_SYSTEM_TIME_HIGH_RES();
1966 block_time = ENA_GET_SYSTEM_TIMEOUT_HIGH_RES(phc->system_time, phc->block_timeout_usec);
1967 expire_time = ENA_GET_SYSTEM_TIMEOUT_HIGH_RES(phc->system_time, phc->expire_timeout_usec);
1970 phc->req_id++;
1975 read_resp->req_id = phc->req_id + ENA_PHC_REQ_ID_OFFSET;
1978 ENA_REG_WRITE32(ena_dev->bus, phc->req_id, ena_dev->reg_bar + phc->doorbell_offset);
1987 phc->error_bound = ENA_PHC_MAX_ERROR_BOUND;
1993 if (READ_ONCE16(read_resp->req_id) != phc->req_id) {
2002 if (unlikely(read_resp->error_flags & ENA_PHC_ERROR_FLAGS)) {
2007 phc->error_bound = ENA_PHC_MAX_ERROR_BOUND;
2013 *timestamp = read_resp->timestamp;
2016 phc->error_bound = read_resp->error_bound;
2019 phc->stats.phc_cnt++;
2022 phc->system_time = zero_system_time;
2027 ENA_SPINLOCK_UNLOCK(phc->lock, flags);
2034 struct ena_com_phc_info *phc = &ena_dev->phc;
2035 u32 local_error_bound = phc->error_bound;
2037 if (!phc->active) {
2052 struct ena_com_mmio_read *mmio_read = &ena_dev->mmio_read;
2054 ENA_SPINLOCK_INIT(mmio_read->lock);
2055 ENA_MEM_ALLOC_COHERENT(ena_dev->dmadev,
2056 sizeof(*mmio_read->read_resp),
2057 mmio_read->read_resp,
2058 mmio_read->read_resp_dma_addr,
2059 mmio_read->read_resp_mem_handle);
2060 if (unlikely(!mmio_read->read_resp))
2065 mmio_read->read_resp->req_id = 0x0;
2066 mmio_read->seq_num = 0x0;
2067 mmio_read->readless_supported = true;
2072 ENA_SPINLOCK_DESTROY(mmio_read->lock);
2078 struct ena_com_mmio_read *mmio_read = &ena_dev->mmio_read;
2080 mmio_read->readless_supported = readless_supported;
2085 struct ena_com_mmio_read *mmio_read = &ena_dev->mmio_read;
2087 ENA_REG_WRITE32(ena_dev->bus, 0x0, ena_dev->reg_bar + ENA_REGS_MMIO_RESP_LO_OFF);
2088 ENA_REG_WRITE32(ena_dev->bus, 0x0, ena_dev->reg_bar + ENA_REGS_MMIO_RESP_HI_OFF);
2090 ENA_MEM_FREE_COHERENT(ena_dev->dmadev,
2091 sizeof(*mmio_read->read_resp),
2092 mmio_read->read_resp,
2093 mmio_read->read_resp_dma_addr,
2094 mmio_read->read_resp_mem_handle);
2096 mmio_read->read_resp = NULL;
2097 ENA_SPINLOCK_DESTROY(mmio_read->lock);
2102 struct ena_com_mmio_read *mmio_read = &ena_dev->mmio_read;
2105 addr_low = ENA_DMA_ADDR_TO_UINT32_LOW(mmio_read->read_resp_dma_addr);
2106 addr_high = ENA_DMA_ADDR_TO_UINT32_HIGH(mmio_read->read_resp_dma_addr);
2108 ENA_REG_WRITE32(ena_dev->bus, addr_low, ena_dev->reg_bar + ENA_REGS_MMIO_RESP_LO_OFF);
2109 ENA_REG_WRITE32(ena_dev->bus, addr_high, ena_dev->reg_bar + ENA_REGS_MMIO_RESP_HI_OFF);
2115 struct ena_com_admin_queue *admin_queue = &ena_dev->admin_queue;
2122 ena_trc_err(ena_dev, "Reg read timeout occurred\n");
2131 admin_queue->q_depth = ENA_ADMIN_QUEUE_DEPTH;
2133 admin_queue->bus = ena_dev->bus;
2134 admin_queue->q_dmadev = ena_dev->dmadev;
2135 admin_queue->polling = false;
2136 admin_queue->curr_cmd_id = 0;
2138 ATOMIC32_SET(&admin_queue->outstanding_cmds, 0);
2140 ENA_SPINLOCK_INIT(admin_queue->q_lock);
2154 admin_queue->sq.db_addr = (u32 __iomem *)((uintptr_t)ena_dev->reg_bar +
2157 addr_low = ENA_DMA_ADDR_TO_UINT32_LOW(admin_queue->sq.dma_addr);
2158 addr_high = ENA_DMA_ADDR_TO_UINT32_HIGH(admin_queue->sq.dma_addr);
2160 ENA_REG_WRITE32(ena_dev->bus, addr_low, ena_dev->reg_bar + ENA_REGS_AQ_BASE_LO_OFF);
2161 ENA_REG_WRITE32(ena_dev->bus, addr_high, ena_dev->reg_bar + ENA_REGS_AQ_BASE_HI_OFF);
2163 addr_low = ENA_DMA_ADDR_TO_UINT32_LOW(admin_queue->cq.dma_addr);
2164 addr_high = ENA_DMA_ADDR_TO_UINT32_HIGH(admin_queue->cq.dma_addr);
2166 ENA_REG_WRITE32(ena_dev->bus, addr_low, ena_dev->reg_bar + ENA_REGS_ACQ_BASE_LO_OFF);
2167 ENA_REG_WRITE32(ena_dev->bus, addr_high, ena_dev->reg_bar + ENA_REGS_ACQ_BASE_HI_OFF);
2170 aq_caps |= admin_queue->q_depth & ENA_REGS_AQ_CAPS_AQ_DEPTH_MASK;
2176 acq_caps |= admin_queue->q_depth & ENA_REGS_ACQ_CAPS_ACQ_DEPTH_MASK;
2181 ENA_REG_WRITE32(ena_dev->bus, aq_caps, ena_dev->reg_bar + ENA_REGS_AQ_CAPS_OFF);
2182 ENA_REG_WRITE32(ena_dev->bus, acq_caps, ena_dev->reg_bar + ENA_REGS_ACQ_CAPS_OFF);
2187 admin_queue->ena_dev = ena_dev;
2188 admin_queue->running_state = true;
2189 admin_queue->is_missing_admin_interrupt = false;
2205 if (unlikely(ctx->qid >= ENA_TOTAL_NUM_QUEUES)) {
2207 ctx->qid, ENA_TOTAL_NUM_QUEUES);
2211 io_sq = &ena_dev->io_sq_queues[ctx->qid];
2212 io_cq = &ena_dev->io_cq_queues[ctx->qid];
2218 io_cq->q_depth = ctx->queue_size;
2219 io_cq->direction = ctx->direction;
2220 io_cq->qid = ctx->qid;
2222 io_cq->msix_vector = ctx->msix_vector;
2224 io_sq->q_depth = ctx->queue_size;
2225 io_sq->direction = ctx->direction;
2226 io_sq->qid = ctx->qid;
2228 io_sq->mem_queue_type = ctx->mem_queue_type;
2230 if (ctx->direction == ENA_COM_IO_QUEUE_DIRECTION_TX)
2232 io_sq->tx_max_header_size =
2233 ENA_MIN32(ena_dev->tx_max_header_size, SZ_256);
2246 ret = ena_com_create_io_sq(ena_dev, io_sq, io_cq->idx);
2270 io_sq = &ena_dev->io_sq_queues[qid];
2271 io_cq = &ena_dev->io_cq_queues[qid];
2289 struct ena_admin_acq_get_stats_resp *get_resp = &ctx->get_resp;
2290 struct ena_admin_aq_get_stats_cmd *get_cmd = &ctx->get_cmd;
2294 admin_queue = &ena_dev->admin_queue;
2296 get_cmd->aq_common_descriptor.opcode = ENA_ADMIN_GET_STATS;
2297 get_cmd->aq_common_descriptor.flags = 0;
2298 get_cmd->type = type;
2318 customer_metrics = &ena_dev->customer_metrics;
2320 customer_metrics->supported_metrics = ENA_ADMIN_CUSTOMER_METRICS_MIN_SUPPORT_MASK;
2328 customer_metrics->supported_metrics =
2345 memcpy(&get_feat_ctx->dev_attr, &get_resp.u.dev_attr,
2348 ena_dev->supported_features = get_resp.u.dev_attr.supported_features;
2349 ena_dev->capabilities = get_resp.u.dev_attr.capabilities;
2351 if (ena_dev->supported_features & BIT(ENA_ADMIN_MAX_QUEUES_EXT)) {
2361 memcpy(&get_feat_ctx->max_queue_ext, &get_resp.u.max_queue_ext,
2363 ena_dev->tx_max_header_size =
2368 memcpy(&get_feat_ctx->max_queues, &get_resp.u.max_queue,
2370 ena_dev->tx_max_header_size =
2382 memcpy(&get_feat_ctx->aenq, &get_resp.u.aenq,
2390 memcpy(&get_feat_ctx->offload, &get_resp.u.offload,
2399 memcpy(&get_feat_ctx->hw_hints, &get_resp.u.hw_hints,
2402 memset(&get_feat_ctx->hw_hints, 0x0, sizeof(get_feat_ctx->hw_hints));
2409 memcpy(&get_feat_ctx->llq, &get_resp.u.llq,
2412 memset(&get_feat_ctx->llq, 0x0, sizeof(get_feat_ctx->llq));
2423 ena_com_handle_admin_completion(&ena_dev->admin_queue);
2432 struct ena_aenq_handlers *aenq_handlers = ena_dev->aenq.aenq_handlers;
2434 if ((group < ENA_MAX_HANDLERS) && aenq_handlers->handlers[group])
2435 return aenq_handlers->handlers[group];
2437 return aenq_handlers->unimplemented_handler;
2448 struct ena_com_aenq *aenq = &ena_dev->aenq;
2454 masked_head = aenq->head & (aenq->q_depth - 1);
2455 phase = aenq->phase;
2456 aenq_e = &aenq->entries[masked_head]; /* Get first entry */
2457 aenq_common = &aenq_e->aenq_common_desc;
2460 while ((READ_ONCE8(aenq_common->flags) &
2467 timestamp = (u64)aenq_common->timestamp_low |
2468 ((u64)aenq_common->timestamp_high << 32);
2471 aenq_common->group,
2472 aenq_common->syndrome,
2477 aenq_common->group);
2484 if (unlikely(masked_head == aenq->q_depth)) {
2488 aenq_e = &aenq->entries[masked_head];
2489 aenq_common = &aenq_e->aenq_common_desc;
2492 aenq->head += processed;
2493 aenq->phase = phase;
2501 ENA_REG_WRITE32_RELAXED(ena_dev->bus, (u32)aenq->head,
2502 ena_dev->reg_bar + ENA_REGS_AENQ_HEAD_DB_OFF);
2509 struct ena_com_aenq *aenq = &ena_dev->aenq;
2511 u8 phase = aenq->phase;
2514 masked_head = aenq->head & (aenq->q_depth - 1);
2515 aenq_e = &aenq->entries[masked_head]; /* Get first entry */
2516 aenq_common = &aenq_e->aenq_common_desc;
2519 while ((READ_ONCE8(aenq_common->flags) &
2526 if (aenq_common->group == ENA_ADMIN_KEEP_ALIVE)
2532 if (unlikely(masked_head == aenq->q_depth)) {
2537 aenq_e = &aenq->entries[masked_head];
2538 aenq_common = &aenq_e->aenq_common_desc;
2558 ena_dev->stats_func = ENA_EXTENDED_STAT_GET_FUNCT(func_queue);
2559 ena_dev->stats_queue = ENA_EXTENDED_STAT_GET_QUEUE(func_queue);
2570 u32 stat, timeout, cap, reset_val;
2578 ena_trc_err(ena_dev, "Reg read32 timeout occurred\n");
2587 timeout = (cap & ENA_REGS_CAPS_RESET_TIMEOUT_MASK) >>
2589 if (timeout == 0) {
2590 ena_trc_err(ena_dev, "Invalid timeout value\n");
2598 * bits 24-27 as MSB, bits 28-31 as LSB
2618 ENA_REG_WRITE32(ena_dev->bus, reset_val, ena_dev->reg_bar + ENA_REGS_DEV_CTL_OFF);
2623 rc = wait_for_reset_state(ena_dev, timeout,
2631 ENA_REG_WRITE32(ena_dev->bus, 0, ena_dev->reg_bar + ENA_REGS_DEV_CTL_OFF);
2632 rc = wait_for_reset_state(ena_dev, timeout, 0);
2638 timeout = (cap & ENA_REGS_CAPS_ADMIN_CMD_TO_MASK) >>
2640 if (timeout)
2641 /* the resolution of timeout reg is 100ms */
2642 ena_dev->admin_queue.completion_timeout = timeout * 100000;
2644 ena_dev->admin_queue.completion_timeout = ADMIN_CMD_TIMEOUT_US;
2715 ENA_MEM_ALLOC_COHERENT(ena_dev->dmadev, len,
2723 &get_cmd->u.control_buffer.address,
2729 get_cmd->u.control_buffer.length = len;
2731 get_cmd->device_id = ena_dev->stats_func;
2732 get_cmd->queue_idx = ena_dev->stats_queue;
2742 ENA_MEM_FREE_COHERENT(ena_dev->dmadev, len, virt_addr, phys_addr,
2755 if (unlikely(len > ena_dev->customer_metrics.buffer_len)) {
2765 if (!ena_dev->customer_metrics.supported_metrics) {
2773 &get_cmd->u.control_buffer.address,
2774 ena_dev->customer_metrics.buffer_dma_addr);
2780 get_cmd->u.control_buffer.length = ena_dev->customer_metrics.buffer_len;
2781 get_cmd->requested_metrics = ena_dev->customer_metrics.supported_metrics;
2784 memcpy(buffer, ena_dev->customer_metrics.buffer_virt_addr, len);
2794 struct ena_admin_set_feat_cmd cmd;
2803 memset(&cmd, 0x0, sizeof(cmd));
2804 admin_queue = &ena_dev->admin_queue;
2806 cmd.aq_common_descriptor.opcode = ENA_ADMIN_SET_FEATURE;
2807 cmd.aq_common_descriptor.flags = 0;
2808 cmd.feat_common.feature_id = ENA_ADMIN_MTU;
2809 cmd.u.mtu.mtu = mtu;
2812 (struct ena_admin_aq_entry *)&cmd,
2813 sizeof(cmd),
2843 struct ena_com_admin_queue *admin_queue = &ena_dev->admin_queue;
2844 struct ena_rss *rss = &ena_dev->rss;
2845 struct ena_admin_set_feat_cmd cmd;
2863 if (!(get_resp.u.flow_hash_func.supported_func & BIT(rss->hash_func))) {
2865 rss->hash_func);
2869 memset(&cmd, 0x0, sizeof(cmd));
2871 cmd.aq_common_descriptor.opcode = ENA_ADMIN_SET_FEATURE;
2872 cmd.aq_common_descriptor.flags =
2874 cmd.feat_common.feature_id = ENA_ADMIN_RSS_HASH_FUNCTION;
2875 cmd.u.flow_hash_func.init_val = rss->hash_init_val;
2876 cmd.u.flow_hash_func.selected_func = 1 << rss->hash_func;
2879 &cmd.control_buffer.address,
2880 rss->hash_key_dma_addr);
2886 cmd.control_buffer.length = sizeof(*rss->hash_key);
2889 (struct ena_admin_aq_entry *)&cmd,
2890 sizeof(cmd),
2895 rss->hash_func, ret);
2909 struct ena_rss *rss = &ena_dev->rss;
2912 hash_key = rss->hash_key;
2920 rss->hash_key_dma_addr,
2921 sizeof(*rss->hash_key), 0);
2931 if (key_len != sizeof(hash_key->key)) {
2933 key_len, sizeof(hash_key->key));
2936 memcpy(hash_key->key, key, key_len);
2937 hash_key->key_parts = key_len / sizeof(hash_key->key[0]);
2940 rss->hash_init_val = init_val;
2941 old_func = rss->hash_func;
2942 rss->hash_func = func;
2947 rss->hash_func = old_func;
2955 struct ena_rss *rss = &ena_dev->rss;
2964 rss->hash_key_dma_addr,
2965 sizeof(*rss->hash_key), 0);
2970 rss->hash_func = ENA_FFS(get_resp.u.flow_hash_func.selected_func);
2971 if (rss->hash_func)
2972 rss->hash_func--;
2974 *func = rss->hash_func;
2982 ena_dev->rss.hash_key;
2985 memcpy(key, hash_key->key,
2986 (size_t)(hash_key->key_parts) * sizeof(hash_key->key[0]));
2995 struct ena_rss *rss = &ena_dev->rss;
3001 rss->hash_ctrl_dma_addr,
3002 sizeof(*rss->hash_ctrl), 0);
3007 *fields = rss->hash_ctrl->selected_fields[proto].fields;
3014 struct ena_com_admin_queue *admin_queue = &ena_dev->admin_queue;
3015 struct ena_rss *rss = &ena_dev->rss;
3016 struct ena_admin_feature_rss_hash_control *hash_ctrl = rss->hash_ctrl;
3017 struct ena_admin_set_feat_cmd cmd;
3028 memset(&cmd, 0x0, sizeof(cmd));
3030 cmd.aq_common_descriptor.opcode = ENA_ADMIN_SET_FEATURE;
3031 cmd.aq_common_descriptor.flags =
3033 cmd.feat_common.feature_id = ENA_ADMIN_RSS_HASH_INPUT;
3034 cmd.u.flow_hash_input.enabled_input_sort =
3039 &cmd.control_buffer.address,
3040 rss->hash_ctrl_dma_addr);
3045 cmd.control_buffer.length = sizeof(*hash_ctrl);
3048 (struct ena_admin_aq_entry *)&cmd,
3049 sizeof(cmd),
3060 struct ena_rss *rss = &ena_dev->rss;
3062 rss->hash_ctrl;
3071 hash_ctrl->selected_fields[ENA_ADMIN_RSS_TCP4].fields =
3075 hash_ctrl->selected_fields[ENA_ADMIN_RSS_UDP4].fields =
3079 hash_ctrl->selected_fields[ENA_ADMIN_RSS_TCP6].fields =
3083 hash_ctrl->selected_fields[ENA_ADMIN_RSS_UDP6].fields =
3087 hash_ctrl->selected_fields[ENA_ADMIN_RSS_IP4].fields =
3090 hash_ctrl->selected_fields[ENA_ADMIN_RSS_IP6].fields =
3093 hash_ctrl->selected_fields[ENA_ADMIN_RSS_IP4_FRAG].fields =
3096 hash_ctrl->selected_fields[ENA_ADMIN_RSS_NOT_IP].fields =
3100 available_fields = hash_ctrl->selected_fields[i].fields &
3101 hash_ctrl->supported_fields[i].fields;
3102 if (available_fields != hash_ctrl->selected_fields[i].fields) {
3104 i, hash_ctrl->supported_fields[i].fields,
3105 hash_ctrl->selected_fields[i].fields);
3123 struct ena_rss *rss = &ena_dev->rss;
3124 struct ena_admin_feature_rss_hash_control *hash_ctrl = rss->hash_ctrl;
3139 supported_fields = hash_ctrl->supported_fields[proto].fields;
3145 hash_ctrl->selected_fields[proto].fields = hash_fields;
3159 struct ena_rss *rss = &ena_dev->rss;
3161 if (unlikely(entry_idx >= (1 << rss->tbl_log_size)))
3167 rss->host_rss_ind_tbl[entry_idx] = entry_value;
3174 struct ena_com_admin_queue *admin_queue = &ena_dev->admin_queue;
3175 struct ena_rss *rss = &ena_dev->rss;
3176 struct ena_admin_set_feat_cmd cmd;
3193 memset(&cmd, 0x0, sizeof(cmd));
3195 cmd.aq_common_descriptor.opcode = ENA_ADMIN_SET_FEATURE;
3196 cmd.aq_common_descriptor.flags =
3198 cmd.feat_common.feature_id = ENA_ADMIN_RSS_INDIRECTION_TABLE_CONFIG;
3199 cmd.u.ind_table.size = rss->tbl_log_size;
3200 cmd.u.ind_table.inline_index = 0xFFFFFFFF;
3203 &cmd.control_buffer.address,
3204 rss->rss_ind_tbl_dma_addr);
3210 cmd.control_buffer.length = (1ULL << rss->tbl_log_size) *
3214 (struct ena_admin_aq_entry *)&cmd,
3215 sizeof(cmd),
3227 struct ena_rss *rss = &ena_dev->rss;
3232 tbl_size = (1ULL << rss->tbl_log_size) *
3237 rss->rss_ind_tbl_dma_addr,
3245 for (i = 0; i < (1 << rss->tbl_log_size); i++)
3246 ind_tbl[i] = rss->host_rss_ind_tbl[i];
3255 memset(&ena_dev->rss, 0x0, sizeof(ena_dev->rss));
3292 memset(&ena_dev->rss, 0x0, sizeof(ena_dev->rss));
3297 struct ena_host_attribute *host_attr = &ena_dev->host_attr;
3299 ENA_MEM_ALLOC_COHERENT(ena_dev->dmadev,
3301 host_attr->host_info,
3302 host_attr->host_info_dma_addr,
3303 host_attr->host_info_dma_handle);
3304 if (unlikely(!host_attr->host_info))
3307 host_attr->host_info->ena_spec_version = ((ENA_COMMON_SPEC_VERSION_MAJOR <<
3317 struct ena_host_attribute *host_attr = &ena_dev->host_attr;
3319 ENA_MEM_ALLOC_COHERENT(ena_dev->dmadev,
3321 host_attr->debug_area_virt_addr,
3322 host_attr->debug_area_dma_addr,
3323 host_attr->debug_area_dma_handle);
3324 if (unlikely(!host_attr->debug_area_virt_addr)) {
3325 host_attr->debug_area_size = 0;
3329 host_attr->debug_area_size = debug_area_size;
3336 struct ena_customer_metrics *customer_metrics = &ena_dev->customer_metrics;
3338 customer_metrics->buffer_len = ENA_CUSTOMER_METRICS_BUFFER_SIZE;
3339 ENA_MEM_ALLOC_COHERENT(ena_dev->dmadev,
3340 customer_metrics->buffer_len,
3341 customer_metrics->buffer_virt_addr,
3342 customer_metrics->buffer_dma_addr,
3343 customer_metrics->buffer_dma_handle);
3344 if (unlikely(!customer_metrics->buffer_virt_addr))
3352 struct ena_host_attribute *host_attr = &ena_dev->host_attr;
3354 if (host_attr->host_info) {
3355 ENA_MEM_FREE_COHERENT(ena_dev->dmadev,
3357 host_attr->host_info,
3358 host_attr->host_info_dma_addr,
3359 host_attr->host_info_dma_handle);
3360 host_attr->host_info = NULL;
3366 struct ena_host_attribute *host_attr = &ena_dev->host_attr;
3368 if (host_attr->debug_area_virt_addr) {
3369 ENA_MEM_FREE_COHERENT(ena_dev->dmadev,
3370 host_attr->debug_area_size,
3371 host_attr->debug_area_virt_addr,
3372 host_attr->debug_area_dma_addr,
3373 host_attr->debug_area_dma_handle);
3374 host_attr->debug_area_virt_addr = NULL;
3380 struct ena_customer_metrics *customer_metrics = &ena_dev->customer_metrics;
3382 if (customer_metrics->buffer_virt_addr) {
3383 ENA_MEM_FREE_COHERENT(ena_dev->dmadev,
3384 customer_metrics->buffer_len,
3385 customer_metrics->buffer_virt_addr,
3386 customer_metrics->buffer_dma_addr,
3387 customer_metrics->buffer_dma_handle);
3388 customer_metrics->buffer_virt_addr = NULL;
3394 struct ena_host_attribute *host_attr = &ena_dev->host_attr;
3396 struct ena_admin_set_feat_cmd cmd;
3405 memset(&cmd, 0x0, sizeof(cmd));
3406 admin_queue = &ena_dev->admin_queue;
3408 cmd.aq_common_descriptor.opcode = ENA_ADMIN_SET_FEATURE;
3409 cmd.feat_common.feature_id = ENA_ADMIN_HOST_ATTR_CONFIG;
3412 &cmd.u.host_attr.debug_ba,
3413 host_attr->debug_area_dma_addr);
3420 &cmd.u.host_attr.os_info_ba,
3421 host_attr->host_info_dma_addr);
3427 cmd.u.host_attr.debug_area_size = host_attr->debug_area_size;
3430 (struct ena_admin_aq_entry *)&cmd,
3431 sizeof(cmd),
3468 ena_dev->intr_delay_resolution,
3469 &ena_dev->intr_moder_tx_interval);
3477 ena_dev->intr_delay_resolution,
3478 &ena_dev->intr_moder_rx_interval);
3497 "Failed to get interrupt moderation admin cmd. rc: %d\n", rc);
3509 /* Disable adaptive moderation by default - can be enabled later */
3517 return ena_dev->intr_moder_tx_interval;
3522 return ena_dev->intr_moder_rx_interval;
3529 struct ena_com_llq_info *llq_info = &ena_dev->llq_info;
3532 if (!llq_features->max_llq_num) {
3533 ena_dev->tx_mem_queue_type = ENA_ADMIN_PLACEMENT_POLICY_HOST;
3541 ena_dev->tx_max_header_size = llq_info->desc_list_entry_size -
3542 (llq_info->descs_num_before_header * sizeof(struct ena_eth_io_tx_desc));
3544 if (unlikely(ena_dev->tx_max_header_size == 0)) {
3549 ena_dev->tx_mem_queue_type = ENA_ADMIN_PLACEMENT_POLICY_DEV;