xref: /dpdk/drivers/net/gve/base/gve_adminq.c (revision f665790a5dbad7b645ff46f31d65e977324e7bfc)
1 /* SPDX-License-Identifier: MIT
2  * Google Virtual Ethernet (gve) driver
3  * Copyright (C) 2015-2022 Google, Inc.
4  */
5 
6 #include "../gve_ethdev.h"
7 #include "gve_adminq.h"
8 #include "gve_register.h"
9 
10 #define GVE_MAX_ADMINQ_RELEASE_CHECK	500
11 #define GVE_ADMINQ_SLEEP_LEN		20
12 #define GVE_MAX_ADMINQ_EVENT_COUNTER_CHECK	100
13 
14 #define GVE_DEVICE_OPTION_ERROR_FMT "%s option error: Expected: length=%d, feature_mask=%x. Actual: length=%d, feature_mask=%x."
15 
16 #define GVE_DEVICE_OPTION_TOO_BIG_FMT "Length of %s option larger than expected. Possible older version of guest driver."
17 
18 static void gve_set_min_desc_cnt(struct gve_priv *priv,
19 	struct gve_device_option_modify_ring *dev_op_modify_ring);
20 
21 static
22 struct gve_device_option *gve_get_next_option(struct gve_device_descriptor *descriptor,
23 					      struct gve_device_option *option)
24 {
25 	uintptr_t option_end, descriptor_end;
26 
27 	option_end = (uintptr_t)option + sizeof(*option) + be16_to_cpu(option->option_length);
28 	descriptor_end = (uintptr_t)descriptor + be16_to_cpu(descriptor->total_length);
29 
30 	return option_end > descriptor_end ? NULL : (struct gve_device_option *)option_end;
31 }
32 
33 static
34 void gve_parse_device_option(struct gve_priv *priv,
35 			     struct gve_device_option *option,
36 			     struct gve_device_option_gqi_rda **dev_op_gqi_rda,
37 			     struct gve_device_option_gqi_qpl **dev_op_gqi_qpl,
38 			     struct gve_device_option_dqo_rda **dev_op_dqo_rda,
39 			     struct gve_device_option_modify_ring **dev_op_modify_ring,
40 			     struct gve_device_option_jumbo_frames **dev_op_jumbo_frames)
41 {
42 	u32 req_feat_mask = be32_to_cpu(option->required_features_mask);
43 	u16 option_length = be16_to_cpu(option->option_length);
44 	u16 option_id = be16_to_cpu(option->option_id);
45 
46 	/* If the length or feature mask doesn't match, continue without
47 	 * enabling the feature.
48 	 */
49 	switch (option_id) {
50 	case GVE_DEV_OPT_ID_GQI_RAW_ADDRESSING:
51 		if (option_length != GVE_DEV_OPT_LEN_GQI_RAW_ADDRESSING ||
52 		    req_feat_mask != GVE_DEV_OPT_REQ_FEAT_MASK_GQI_RAW_ADDRESSING) {
53 			PMD_DRV_LOG(WARNING, GVE_DEVICE_OPTION_ERROR_FMT,
54 				    "Raw Addressing",
55 				    GVE_DEV_OPT_LEN_GQI_RAW_ADDRESSING,
56 				    GVE_DEV_OPT_REQ_FEAT_MASK_GQI_RAW_ADDRESSING,
57 				    option_length, req_feat_mask);
58 			break;
59 		}
60 
61 		PMD_DRV_LOG(INFO, "Gqi raw addressing device option enabled.");
62 		priv->queue_format = GVE_GQI_RDA_FORMAT;
63 		break;
64 	case GVE_DEV_OPT_ID_GQI_RDA:
65 		if (option_length < sizeof(**dev_op_gqi_rda) ||
66 		    req_feat_mask != GVE_DEV_OPT_REQ_FEAT_MASK_GQI_RDA) {
67 			PMD_DRV_LOG(WARNING, GVE_DEVICE_OPTION_ERROR_FMT,
68 				    "GQI RDA", (int)sizeof(**dev_op_gqi_rda),
69 				    GVE_DEV_OPT_REQ_FEAT_MASK_GQI_RDA,
70 				    option_length, req_feat_mask);
71 			break;
72 		}
73 
74 		if (option_length > sizeof(**dev_op_gqi_rda)) {
75 			PMD_DRV_LOG(WARNING,
76 				    GVE_DEVICE_OPTION_TOO_BIG_FMT, "GQI RDA");
77 		}
78 		*dev_op_gqi_rda = RTE_PTR_ADD(option, sizeof(*option));
79 		break;
80 	case GVE_DEV_OPT_ID_GQI_QPL:
81 		if (option_length < sizeof(**dev_op_gqi_qpl) ||
82 		    req_feat_mask != GVE_DEV_OPT_REQ_FEAT_MASK_GQI_QPL) {
83 			PMD_DRV_LOG(WARNING, GVE_DEVICE_OPTION_ERROR_FMT,
84 				    "GQI QPL", (int)sizeof(**dev_op_gqi_qpl),
85 				    GVE_DEV_OPT_REQ_FEAT_MASK_GQI_QPL,
86 				    option_length, req_feat_mask);
87 			break;
88 		}
89 
90 		if (option_length > sizeof(**dev_op_gqi_qpl)) {
91 			PMD_DRV_LOG(WARNING,
92 				    GVE_DEVICE_OPTION_TOO_BIG_FMT, "GQI QPL");
93 		}
94 		*dev_op_gqi_qpl = RTE_PTR_ADD(option, sizeof(*option));
95 		break;
96 	case GVE_DEV_OPT_ID_DQO_RDA:
97 		if (option_length < sizeof(**dev_op_dqo_rda) ||
98 		    req_feat_mask != GVE_DEV_OPT_REQ_FEAT_MASK_DQO_RDA) {
99 			PMD_DRV_LOG(WARNING, GVE_DEVICE_OPTION_ERROR_FMT,
100 				    "DQO RDA", (int)sizeof(**dev_op_dqo_rda),
101 				    GVE_DEV_OPT_REQ_FEAT_MASK_DQO_RDA,
102 				    option_length, req_feat_mask);
103 			break;
104 		}
105 
106 		if (option_length > sizeof(**dev_op_dqo_rda)) {
107 			PMD_DRV_LOG(WARNING,
108 				    GVE_DEVICE_OPTION_TOO_BIG_FMT, "DQO RDA");
109 		}
110 		*dev_op_dqo_rda = RTE_PTR_ADD(option, sizeof(*option));
111 		break;
112 	case GVE_DEV_OPT_ID_MODIFY_RING:
113 		/* Min ring size bound is optional. */
114 		if (option_length < (sizeof(**dev_op_modify_ring) -
115 			sizeof(struct gve_ring_size_bound)) ||
116 		    req_feat_mask != GVE_DEV_OPT_REQ_FEAT_MASK_MODIFY_RING) {
117 			PMD_DRV_LOG(WARNING, GVE_DEVICE_OPTION_ERROR_FMT,
118 				    "Modify Ring",
119 				    (int)sizeof(**dev_op_modify_ring),
120 				    GVE_DEV_OPT_REQ_FEAT_MASK_MODIFY_RING,
121 				    option_length, req_feat_mask);
122 			break;
123 		}
124 
125 		if (option_length > sizeof(**dev_op_modify_ring)) {
126 			PMD_DRV_LOG(WARNING,
127 				    GVE_DEVICE_OPTION_TOO_BIG_FMT,
128 				    "Modify Ring");
129 		}
130 		*dev_op_modify_ring = RTE_PTR_ADD(option, sizeof(*option));
131 
132 		/* Min ring size included; set the minimum ring size. */
133 		if (option_length == sizeof(**dev_op_modify_ring))
134 			gve_set_min_desc_cnt(priv, *dev_op_modify_ring);
135 		break;
136 	case GVE_DEV_OPT_ID_JUMBO_FRAMES:
137 		if (option_length < sizeof(**dev_op_jumbo_frames) ||
138 		    req_feat_mask != GVE_DEV_OPT_REQ_FEAT_MASK_JUMBO_FRAMES) {
139 			PMD_DRV_LOG(WARNING, GVE_DEVICE_OPTION_ERROR_FMT,
140 				    "Jumbo Frames",
141 				    (int)sizeof(**dev_op_jumbo_frames),
142 				    GVE_DEV_OPT_REQ_FEAT_MASK_JUMBO_FRAMES,
143 				    option_length, req_feat_mask);
144 			break;
145 		}
146 
147 		if (option_length > sizeof(**dev_op_jumbo_frames)) {
148 			PMD_DRV_LOG(WARNING,
149 				    GVE_DEVICE_OPTION_TOO_BIG_FMT,
150 				    "Jumbo Frames");
151 		}
152 		*dev_op_jumbo_frames = RTE_PTR_ADD(option, sizeof(*option));
153 		break;
154 	default:
155 		/* If we don't recognize the option just continue
156 		 * without doing anything.
157 		 */
158 		PMD_DRV_LOG(DEBUG, "Unrecognized device option 0x%hx not enabled.",
159 			    option_id);
160 	}
161 }
162 
163 /* Process all device options for a given describe device call. */
164 static int
165 gve_process_device_options(struct gve_priv *priv,
166 			   struct gve_device_descriptor *descriptor,
167 			   struct gve_device_option_gqi_rda **dev_op_gqi_rda,
168 			   struct gve_device_option_gqi_qpl **dev_op_gqi_qpl,
169 			   struct gve_device_option_dqo_rda **dev_op_dqo_rda,
170 			   struct gve_device_option_modify_ring **dev_op_modify_ring,
171 			   struct gve_device_option_jumbo_frames **dev_op_jumbo_frames)
172 {
173 	const int num_options = be16_to_cpu(descriptor->num_device_options);
174 	struct gve_device_option *dev_opt;
175 	int i;
176 
177 	/* The options struct directly follows the device descriptor. */
178 	dev_opt = RTE_PTR_ADD(descriptor, sizeof(*descriptor));
179 	for (i = 0; i < num_options; i++) {
180 		struct gve_device_option *next_opt;
181 
182 		next_opt = gve_get_next_option(descriptor, dev_opt);
183 		if (!next_opt) {
184 			PMD_DRV_LOG(ERR,
185 				    "options exceed device_descriptor's total length.");
186 			return -EINVAL;
187 		}
188 
189 		gve_parse_device_option(priv, dev_opt,
190 					dev_op_gqi_rda, dev_op_gqi_qpl,
191 					dev_op_dqo_rda, dev_op_modify_ring,
192 					dev_op_jumbo_frames);
193 		dev_opt = next_opt;
194 	}
195 
196 	return 0;
197 }
198 
199 int gve_adminq_alloc(struct gve_priv *priv)
200 {
201 	priv->adminq = gve_alloc_dma_mem(&priv->adminq_dma_mem, PAGE_SIZE);
202 	if (unlikely(!priv->adminq))
203 		return -ENOMEM;
204 
205 	priv->adminq_mask = (PAGE_SIZE / sizeof(union gve_adminq_command)) - 1;
206 	priv->adminq_prod_cnt = 0;
207 	priv->adminq_cmd_fail = 0;
208 	priv->adminq_timeouts = 0;
209 	priv->adminq_describe_device_cnt = 0;
210 	priv->adminq_cfg_device_resources_cnt = 0;
211 	priv->adminq_register_page_list_cnt = 0;
212 	priv->adminq_unregister_page_list_cnt = 0;
213 	priv->adminq_create_tx_queue_cnt = 0;
214 	priv->adminq_create_rx_queue_cnt = 0;
215 	priv->adminq_destroy_tx_queue_cnt = 0;
216 	priv->adminq_destroy_rx_queue_cnt = 0;
217 	priv->adminq_dcfg_device_resources_cnt = 0;
218 	priv->adminq_set_driver_parameter_cnt = 0;
219 	priv->adminq_report_stats_cnt = 0;
220 	priv->adminq_report_link_speed_cnt = 0;
221 	priv->adminq_get_ptype_map_cnt = 0;
222 
223 	/* Setup Admin queue with the device */
224 	iowrite32be(priv->adminq_dma_mem.pa / PAGE_SIZE,
225 		    &priv->reg_bar0->adminq_pfn);
226 
227 	gve_set_admin_queue_ok(priv);
228 	return 0;
229 }
230 
231 void gve_adminq_release(struct gve_priv *priv)
232 {
233 	int i = 0;
234 
235 	/* Tell the device the adminq is leaving */
236 	iowrite32be(0x0, &priv->reg_bar0->adminq_pfn);
237 	while (ioread32be(&priv->reg_bar0->adminq_pfn)) {
238 		/* If this is reached the device is unrecoverable and still
239 		 * holding memory. Continue looping to avoid memory corruption,
240 		 * but WARN so it is visible what is going on.
241 		 */
242 		if (i == GVE_MAX_ADMINQ_RELEASE_CHECK)
243 			PMD_DRV_LOG(WARNING, "Unrecoverable platform error!");
244 		i++;
245 		msleep(GVE_ADMINQ_SLEEP_LEN);
246 	}
247 	gve_clear_device_rings_ok(priv);
248 	gve_clear_device_resources_ok(priv);
249 	gve_clear_admin_queue_ok(priv);
250 }
251 
252 void gve_adminq_free(struct gve_priv *priv)
253 {
254 	if (!gve_get_admin_queue_ok(priv))
255 		return;
256 	gve_adminq_release(priv);
257 	gve_free_dma_mem(&priv->adminq_dma_mem);
258 	gve_clear_admin_queue_ok(priv);
259 }
260 
261 static void gve_adminq_kick_cmd(struct gve_priv *priv, u32 prod_cnt)
262 {
263 	iowrite32be(prod_cnt, &priv->reg_bar0->adminq_doorbell);
264 }
265 
266 static bool gve_adminq_wait_for_cmd(struct gve_priv *priv, u32 prod_cnt)
267 {
268 	int i;
269 
270 	for (i = 0; i < GVE_MAX_ADMINQ_EVENT_COUNTER_CHECK; i++) {
271 		if (ioread32be(&priv->reg_bar0->adminq_event_counter)
272 		    == prod_cnt)
273 			return true;
274 		msleep(GVE_ADMINQ_SLEEP_LEN);
275 	}
276 
277 	return false;
278 }
279 
280 static int gve_adminq_parse_err(struct gve_priv *priv, u32 status)
281 {
282 	if (status != GVE_ADMINQ_COMMAND_PASSED &&
283 	    status != GVE_ADMINQ_COMMAND_UNSET) {
284 		PMD_DRV_LOG(ERR, "AQ command failed with status %d", status);
285 		priv->adminq_cmd_fail++;
286 	}
287 	switch (status) {
288 	case GVE_ADMINQ_COMMAND_PASSED:
289 		return 0;
290 	case GVE_ADMINQ_COMMAND_UNSET:
291 		PMD_DRV_LOG(ERR, "parse_aq_err: err and status both unset, this should not be possible.");
292 		return -EINVAL;
293 	case GVE_ADMINQ_COMMAND_ERROR_ABORTED:
294 	case GVE_ADMINQ_COMMAND_ERROR_CANCELLED:
295 	case GVE_ADMINQ_COMMAND_ERROR_DATALOSS:
296 	case GVE_ADMINQ_COMMAND_ERROR_FAILED_PRECONDITION:
297 	case GVE_ADMINQ_COMMAND_ERROR_UNAVAILABLE:
298 		return -EAGAIN;
299 	case GVE_ADMINQ_COMMAND_ERROR_ALREADY_EXISTS:
300 	case GVE_ADMINQ_COMMAND_ERROR_INTERNAL_ERROR:
301 	case GVE_ADMINQ_COMMAND_ERROR_INVALID_ARGUMENT:
302 	case GVE_ADMINQ_COMMAND_ERROR_NOT_FOUND:
303 	case GVE_ADMINQ_COMMAND_ERROR_OUT_OF_RANGE:
304 	case GVE_ADMINQ_COMMAND_ERROR_UNKNOWN_ERROR:
305 		return -EINVAL;
306 	case GVE_ADMINQ_COMMAND_ERROR_DEADLINE_EXCEEDED:
307 		return -ETIMEDOUT;
308 	case GVE_ADMINQ_COMMAND_ERROR_PERMISSION_DENIED:
309 	case GVE_ADMINQ_COMMAND_ERROR_UNAUTHENTICATED:
310 		return -EACCES;
311 	case GVE_ADMINQ_COMMAND_ERROR_RESOURCE_EXHAUSTED:
312 		return -ENOMEM;
313 	case GVE_ADMINQ_COMMAND_ERROR_UNIMPLEMENTED:
314 		return -ENOTSUP;
315 	default:
316 		PMD_DRV_LOG(ERR, "parse_aq_err: unknown status code %d",
317 			    status);
318 		return -EINVAL;
319 	}
320 }
321 
322 /* Flushes all AQ commands currently queued and waits for them to complete.
323  * If there are failures, it will return the first error.
324  */
325 static int gve_adminq_kick_and_wait(struct gve_priv *priv)
326 {
327 	u32 tail, head;
328 	u32 i;
329 
330 	tail = ioread32be(&priv->reg_bar0->adminq_event_counter);
331 	head = priv->adminq_prod_cnt;
332 
333 	gve_adminq_kick_cmd(priv, head);
334 	if (!gve_adminq_wait_for_cmd(priv, head)) {
335 		PMD_DRV_LOG(ERR, "AQ commands timed out, need to reset AQ");
336 		priv->adminq_timeouts++;
337 		return -ENOTRECOVERABLE;
338 	}
339 
340 	for (i = tail; i < head; i++) {
341 		union gve_adminq_command *cmd;
342 		u32 status, err;
343 
344 		cmd = &priv->adminq[i & priv->adminq_mask];
345 		status = be32_to_cpu(READ_ONCE32(cmd->status));
346 		err = gve_adminq_parse_err(priv, status);
347 		if (err)
348 			/* Return the first error if we failed. */
349 			return err;
350 	}
351 
352 	return 0;
353 }
354 
355 /* This function is not threadsafe - the caller is responsible for any
356  * necessary locks.
357  */
358 static int gve_adminq_issue_cmd(struct gve_priv *priv,
359 				union gve_adminq_command *cmd_orig)
360 {
361 	union gve_adminq_command *cmd;
362 	u32 opcode;
363 	u32 tail;
364 
365 	tail = ioread32be(&priv->reg_bar0->adminq_event_counter);
366 
367 	/* Check if next command will overflow the buffer. */
368 	if (((priv->adminq_prod_cnt + 1) & priv->adminq_mask) ==
369 	    (tail & priv->adminq_mask)) {
370 		int err;
371 
372 		/* Flush existing commands to make room. */
373 		err = gve_adminq_kick_and_wait(priv);
374 		if (err)
375 			return err;
376 
377 		/* Retry. */
378 		tail = ioread32be(&priv->reg_bar0->adminq_event_counter);
379 		if (((priv->adminq_prod_cnt + 1) & priv->adminq_mask) ==
380 		    (tail & priv->adminq_mask)) {
381 			/* This should never happen. We just flushed the
382 			 * command queue so there should be enough space.
383 			 */
384 			return -ENOMEM;
385 		}
386 	}
387 
388 	cmd = &priv->adminq[priv->adminq_prod_cnt & priv->adminq_mask];
389 	priv->adminq_prod_cnt++;
390 
391 	memcpy(cmd, cmd_orig, sizeof(*cmd_orig));
392 	opcode = be32_to_cpu(READ_ONCE32(cmd->opcode));
393 
394 	switch (opcode) {
395 	case GVE_ADMINQ_DESCRIBE_DEVICE:
396 		priv->adminq_describe_device_cnt++;
397 		break;
398 	case GVE_ADMINQ_CONFIGURE_DEVICE_RESOURCES:
399 		priv->adminq_cfg_device_resources_cnt++;
400 		break;
401 	case GVE_ADMINQ_REGISTER_PAGE_LIST:
402 		priv->adminq_register_page_list_cnt++;
403 		break;
404 	case GVE_ADMINQ_UNREGISTER_PAGE_LIST:
405 		priv->adminq_unregister_page_list_cnt++;
406 		break;
407 	case GVE_ADMINQ_CREATE_TX_QUEUE:
408 		priv->adminq_create_tx_queue_cnt++;
409 		break;
410 	case GVE_ADMINQ_CREATE_RX_QUEUE:
411 		priv->adminq_create_rx_queue_cnt++;
412 		break;
413 	case GVE_ADMINQ_DESTROY_TX_QUEUE:
414 		priv->adminq_destroy_tx_queue_cnt++;
415 		break;
416 	case GVE_ADMINQ_DESTROY_RX_QUEUE:
417 		priv->adminq_destroy_rx_queue_cnt++;
418 		break;
419 	case GVE_ADMINQ_DECONFIGURE_DEVICE_RESOURCES:
420 		priv->adminq_dcfg_device_resources_cnt++;
421 		break;
422 	case GVE_ADMINQ_CONFIGURE_RSS:
423 		priv->adminq_cfg_rss_cnt++;
424 		break;
425 	case GVE_ADMINQ_SET_DRIVER_PARAMETER:
426 		priv->adminq_set_driver_parameter_cnt++;
427 		break;
428 	case GVE_ADMINQ_REPORT_STATS:
429 		priv->adminq_report_stats_cnt++;
430 		break;
431 	case GVE_ADMINQ_REPORT_LINK_SPEED:
432 		priv->adminq_report_link_speed_cnt++;
433 		break;
434 	case GVE_ADMINQ_GET_PTYPE_MAP:
435 		priv->adminq_get_ptype_map_cnt++;
436 		break;
437 	case GVE_ADMINQ_VERIFY_DRIVER_COMPATIBILITY:
438 		priv->adminq_verify_driver_compatibility_cnt++;
439 		break;
440 	default:
441 		PMD_DRV_LOG(ERR, "unknown AQ command opcode %d", opcode);
442 	}
443 
444 	return 0;
445 }
446 
447 /* This function is not threadsafe - the caller is responsible for any
448  * necessary locks.
449  * The caller is also responsible for making sure there are no commands
450  * waiting to be executed.
451  */
452 static int gve_adminq_execute_cmd(struct gve_priv *priv,
453 				  union gve_adminq_command *cmd_orig)
454 {
455 	u32 tail, head;
456 	int err;
457 
458 	tail = ioread32be(&priv->reg_bar0->adminq_event_counter);
459 	head = priv->adminq_prod_cnt;
460 	if (tail != head)
461 		/* This is not a valid path */
462 		return -EINVAL;
463 
464 	err = gve_adminq_issue_cmd(priv, cmd_orig);
465 	if (err)
466 		return err;
467 
468 	return gve_adminq_kick_and_wait(priv);
469 }
470 
471 /* The device specifies that the management vector can either be the first irq
472  * or the last irq. ntfy_blk_msix_base_idx indicates the first irq assigned to
473  * the ntfy blks. It if is 0 then the management vector is last, if it is 1 then
474  * the management vector is first.
475  *
476  * gve arranges the msix vectors so that the management vector is last.
477  */
478 #define GVE_NTFY_BLK_BASE_MSIX_IDX	0
479 int gve_adminq_configure_device_resources(struct gve_priv *priv,
480 					  dma_addr_t counter_array_bus_addr,
481 					  u32 num_counters,
482 					  dma_addr_t db_array_bus_addr,
483 					  u32 num_ntfy_blks)
484 {
485 	union gve_adminq_command cmd;
486 
487 	memset(&cmd, 0, sizeof(cmd));
488 	cmd.opcode = cpu_to_be32(GVE_ADMINQ_CONFIGURE_DEVICE_RESOURCES);
489 	cmd.configure_device_resources =
490 		(struct gve_adminq_configure_device_resources) {
491 		.counter_array = cpu_to_be64(counter_array_bus_addr),
492 		.num_counters = cpu_to_be32(num_counters),
493 		.irq_db_addr = cpu_to_be64(db_array_bus_addr),
494 		.num_irq_dbs = cpu_to_be32(num_ntfy_blks),
495 		.irq_db_stride = cpu_to_be32(sizeof(*priv->irq_dbs)),
496 		.ntfy_blk_msix_base_idx =
497 					cpu_to_be32(GVE_NTFY_BLK_BASE_MSIX_IDX),
498 		.queue_format = priv->queue_format,
499 	};
500 
501 	return gve_adminq_execute_cmd(priv, &cmd);
502 }
503 
504 int gve_adminq_verify_driver_compatibility(struct gve_priv *priv,
505 					   u64 driver_info_len,
506 					   dma_addr_t driver_info_addr)
507 {
508 	union gve_adminq_command cmd;
509 
510 	memset(&cmd, 0, sizeof(cmd));
511 	cmd.opcode = cpu_to_be32(GVE_ADMINQ_VERIFY_DRIVER_COMPATIBILITY);
512 	cmd.verify_driver_compatibility = (struct gve_adminq_verify_driver_compatibility) {
513 		.driver_info_len = cpu_to_be64(driver_info_len),
514 		.driver_info_addr = cpu_to_be64(driver_info_addr),
515 	};
516 
517 	return gve_adminq_execute_cmd(priv, &cmd);
518 }
519 
520 int gve_adminq_deconfigure_device_resources(struct gve_priv *priv)
521 {
522 	union gve_adminq_command cmd;
523 
524 	memset(&cmd, 0, sizeof(cmd));
525 	cmd.opcode = cpu_to_be32(GVE_ADMINQ_DECONFIGURE_DEVICE_RESOURCES);
526 
527 	return gve_adminq_execute_cmd(priv, &cmd);
528 }
529 
530 static int gve_adminq_create_tx_queue(struct gve_priv *priv, u32 queue_index)
531 {
532 	struct gve_tx_queue *txq = priv->txqs[queue_index];
533 	union gve_adminq_command cmd;
534 
535 	memset(&cmd, 0, sizeof(cmd));
536 	cmd.opcode = cpu_to_be32(GVE_ADMINQ_CREATE_TX_QUEUE);
537 	cmd.create_tx_queue = (struct gve_adminq_create_tx_queue) {
538 		.queue_id = cpu_to_be32(queue_index),
539 		.queue_resources_addr =
540 			cpu_to_be64(txq->qres_mz->iova),
541 		.tx_ring_addr = cpu_to_be64(txq->tx_ring_phys_addr),
542 		.ntfy_id = cpu_to_be32(txq->ntfy_id),
543 		.tx_ring_size = cpu_to_be16(txq->nb_tx_desc),
544 	};
545 
546 	if (gve_is_gqi(priv)) {
547 		u32 qpl_id = priv->queue_format == GVE_GQI_RDA_FORMAT ?
548 			GVE_RAW_ADDRESSING_QPL_ID : txq->qpl->id;
549 
550 		cmd.create_tx_queue.queue_page_list_id = cpu_to_be32(qpl_id);
551 	} else {
552 		cmd.create_tx_queue.tx_comp_ring_addr =
553 			cpu_to_be64(txq->compl_ring_phys_addr);
554 		cmd.create_tx_queue.tx_comp_ring_size =
555 			cpu_to_be16(txq->sw_size);
556 	}
557 
558 	return gve_adminq_issue_cmd(priv, &cmd);
559 }
560 
561 int gve_adminq_create_tx_queues(struct gve_priv *priv, u32 num_queues)
562 {
563 	int err;
564 	u32 i;
565 
566 	for (i = 0; i < num_queues; i++) {
567 		err = gve_adminq_create_tx_queue(priv, i);
568 		if (err)
569 			return err;
570 	}
571 
572 	return gve_adminq_kick_and_wait(priv);
573 }
574 
575 static int gve_adminq_create_rx_queue(struct gve_priv *priv, u32 queue_index)
576 {
577 	struct gve_rx_queue *rxq = priv->rxqs[queue_index];
578 	union gve_adminq_command cmd;
579 
580 	memset(&cmd, 0, sizeof(cmd));
581 	cmd.opcode = cpu_to_be32(GVE_ADMINQ_CREATE_RX_QUEUE);
582 	cmd.create_rx_queue = (struct gve_adminq_create_rx_queue) {
583 		.queue_id = cpu_to_be32(queue_index),
584 		.ntfy_id = cpu_to_be32(rxq->ntfy_id),
585 		.queue_resources_addr = cpu_to_be64(rxq->qres_mz->iova),
586 		.rx_ring_size = cpu_to_be16(rxq->nb_rx_desc),
587 	};
588 
589 	if (gve_is_gqi(priv)) {
590 		u32 qpl_id = priv->queue_format == GVE_GQI_RDA_FORMAT ?
591 			GVE_RAW_ADDRESSING_QPL_ID : rxq->qpl->id;
592 
593 		cmd.create_rx_queue.rx_desc_ring_addr =
594 			cpu_to_be64(rxq->mz->iova),
595 		cmd.create_rx_queue.rx_data_ring_addr =
596 			cpu_to_be64(rxq->data_mz->iova),
597 		cmd.create_rx_queue.index = cpu_to_be32(queue_index);
598 		cmd.create_rx_queue.queue_page_list_id = cpu_to_be32(qpl_id);
599 		cmd.create_rx_queue.packet_buffer_size = cpu_to_be16(rxq->rx_buf_len);
600 	} else {
601 		cmd.create_rx_queue.rx_desc_ring_addr =
602 			cpu_to_be64(rxq->compl_ring_phys_addr);
603 		cmd.create_rx_queue.rx_data_ring_addr =
604 			cpu_to_be64(rxq->rx_ring_phys_addr);
605 		cmd.create_rx_queue.packet_buffer_size =
606 			cpu_to_be16(rxq->rx_buf_len);
607 		cmd.create_rx_queue.rx_buff_ring_size =
608 			cpu_to_be16(rxq->nb_rx_desc);
609 		cmd.create_rx_queue.enable_rsc = !!(priv->enable_rsc);
610 	}
611 
612 	return gve_adminq_issue_cmd(priv, &cmd);
613 }
614 
615 int gve_adminq_create_rx_queues(struct gve_priv *priv, u32 num_queues)
616 {
617 	int err;
618 	u32 i;
619 
620 	for (i = 0; i < num_queues; i++) {
621 		err = gve_adminq_create_rx_queue(priv, i);
622 		if (err)
623 			return err;
624 	}
625 
626 	return gve_adminq_kick_and_wait(priv);
627 }
628 
629 static int gve_adminq_destroy_tx_queue(struct gve_priv *priv, u32 queue_index)
630 {
631 	union gve_adminq_command cmd;
632 	int err;
633 
634 	memset(&cmd, 0, sizeof(cmd));
635 	cmd.opcode = cpu_to_be32(GVE_ADMINQ_DESTROY_TX_QUEUE);
636 	cmd.destroy_tx_queue = (struct gve_adminq_destroy_tx_queue) {
637 		.queue_id = cpu_to_be32(queue_index),
638 	};
639 
640 	err = gve_adminq_issue_cmd(priv, &cmd);
641 	if (err)
642 		return err;
643 
644 	return 0;
645 }
646 
647 int gve_adminq_destroy_tx_queues(struct gve_priv *priv, u32 num_queues)
648 {
649 	int err;
650 	u32 i;
651 
652 	for (i = 0; i < num_queues; i++) {
653 		err = gve_adminq_destroy_tx_queue(priv, i);
654 		if (err)
655 			return err;
656 	}
657 
658 	return gve_adminq_kick_and_wait(priv);
659 }
660 
661 static int gve_adminq_destroy_rx_queue(struct gve_priv *priv, u32 queue_index)
662 {
663 	union gve_adminq_command cmd;
664 	int err;
665 
666 	memset(&cmd, 0, sizeof(cmd));
667 	cmd.opcode = cpu_to_be32(GVE_ADMINQ_DESTROY_RX_QUEUE);
668 	cmd.destroy_rx_queue = (struct gve_adminq_destroy_rx_queue) {
669 		.queue_id = cpu_to_be32(queue_index),
670 	};
671 
672 	err = gve_adminq_issue_cmd(priv, &cmd);
673 	if (err)
674 		return err;
675 
676 	return 0;
677 }
678 
679 int gve_adminq_destroy_rx_queues(struct gve_priv *priv, u32 num_queues)
680 {
681 	int err;
682 	u32 i;
683 
684 	for (i = 0; i < num_queues; i++) {
685 		err = gve_adminq_destroy_rx_queue(priv, i);
686 		if (err)
687 			return err;
688 	}
689 
690 	return gve_adminq_kick_and_wait(priv);
691 }
692 
693 static int gve_set_desc_cnt(struct gve_priv *priv,
694 			    struct gve_device_descriptor *descriptor)
695 {
696 	priv->default_tx_desc_cnt = be16_to_cpu(descriptor->tx_queue_entries);
697 	if (priv->default_tx_desc_cnt * sizeof(priv->txqs[0]->tx_desc_ring[0])
698 	    < PAGE_SIZE) {
699 		PMD_DRV_LOG(ERR, "Tx desc count %d too low",
700 			    priv->default_tx_desc_cnt);
701 		return -EINVAL;
702 	}
703 	priv->default_rx_desc_cnt = be16_to_cpu(descriptor->rx_queue_entries);
704 	if (priv->default_rx_desc_cnt * sizeof(priv->rxqs[0]->rx_desc_ring[0])
705 	    < PAGE_SIZE) {
706 		PMD_DRV_LOG(ERR, "Rx desc count %d too low", priv->default_rx_desc_cnt);
707 		return -EINVAL;
708 	}
709 	return 0;
710 }
711 
712 static int
713 gve_set_desc_cnt_dqo(struct gve_priv *priv,
714 		     const struct gve_device_descriptor *descriptor,
715 		     const struct gve_device_option_dqo_rda *dev_op_dqo_rda)
716 {
717 	priv->default_tx_desc_cnt = be16_to_cpu(descriptor->tx_queue_entries);
718 	priv->tx_compq_size = be16_to_cpu(dev_op_dqo_rda->tx_comp_ring_entries);
719 	priv->default_rx_desc_cnt = be16_to_cpu(descriptor->rx_queue_entries);
720 	priv->rx_bufq_size = be16_to_cpu(dev_op_dqo_rda->rx_buff_ring_entries);
721 
722 	return 0;
723 }
724 
725 static void
726 gve_set_min_desc_cnt(struct gve_priv *priv,
727 	struct gve_device_option_modify_ring *modify_ring)
728 {
729 	priv->min_rx_desc_cnt = be16_to_cpu(modify_ring->min_ring_size.rx);
730 	priv->min_tx_desc_cnt = be16_to_cpu(modify_ring->min_ring_size.tx);
731 }
732 
733 static void
734 gve_set_max_desc_cnt(struct gve_priv *priv,
735 	const struct gve_device_option_modify_ring *modify_ring)
736 {
737 	if (priv->queue_format == GVE_DQO_RDA_FORMAT) {
738 		PMD_DRV_LOG(DEBUG, "Overriding max ring size from device for DQ "
739 			    "queue format to 4096.");
740 		priv->max_rx_desc_cnt = GVE_MAX_QUEUE_SIZE_DQO;
741 		priv->max_tx_desc_cnt = GVE_MAX_QUEUE_SIZE_DQO;
742 		return;
743 	}
744 	priv->max_rx_desc_cnt = be16_to_cpu(modify_ring->max_ring_size.rx);
745 	priv->max_tx_desc_cnt = be16_to_cpu(modify_ring->max_ring_size.tx);
746 }
747 
748 static void gve_enable_supported_features(struct gve_priv *priv,
749 	u32 supported_features_mask,
750 	const struct gve_device_option_modify_ring *dev_op_modify_ring,
751 	const struct gve_device_option_jumbo_frames *dev_op_jumbo_frames)
752 {
753 	if (dev_op_modify_ring &&
754 	    (supported_features_mask & GVE_SUP_MODIFY_RING_MASK)) {
755 		PMD_DRV_LOG(INFO, "MODIFY RING device option enabled.");
756 		/* Min ring size set separately by virtue of it being optional. */
757 		gve_set_max_desc_cnt(priv, dev_op_modify_ring);
758 	}
759 
760 	/* Before control reaches this point, the page-size-capped max MTU from
761 	 * the gve_device_descriptor field has already been stored in
762 	 * priv->dev->max_mtu. We overwrite it with the true max MTU below.
763 	 */
764 	if (dev_op_jumbo_frames &&
765 	    (supported_features_mask & GVE_SUP_JUMBO_FRAMES_MASK)) {
766 		PMD_DRV_LOG(INFO, "JUMBO FRAMES device option enabled.");
767 		priv->max_mtu = be16_to_cpu(dev_op_jumbo_frames->max_mtu);
768 	}
769 }
770 
771 int gve_adminq_describe_device(struct gve_priv *priv)
772 {
773 	struct gve_device_option_jumbo_frames *dev_op_jumbo_frames = NULL;
774 	struct gve_device_option_modify_ring *dev_op_modify_ring = NULL;
775 	struct gve_device_option_gqi_rda *dev_op_gqi_rda = NULL;
776 	struct gve_device_option_gqi_qpl *dev_op_gqi_qpl = NULL;
777 	struct gve_device_option_dqo_rda *dev_op_dqo_rda = NULL;
778 	struct gve_device_descriptor *descriptor;
779 	struct gve_dma_mem descriptor_dma_mem;
780 	u32 supported_features_mask = 0;
781 	union gve_adminq_command cmd;
782 	int err = 0;
783 	u16 mtu;
784 
785 	memset(&cmd, 0, sizeof(cmd));
786 	descriptor = gve_alloc_dma_mem(&descriptor_dma_mem, PAGE_SIZE);
787 	if (!descriptor)
788 		return -ENOMEM;
789 	cmd.opcode = cpu_to_be32(GVE_ADMINQ_DESCRIBE_DEVICE);
790 	cmd.describe_device.device_descriptor_addr =
791 					cpu_to_be64(descriptor_dma_mem.pa);
792 	cmd.describe_device.device_descriptor_version =
793 			cpu_to_be32(GVE_ADMINQ_DEVICE_DESCRIPTOR_VERSION);
794 	cmd.describe_device.available_length = cpu_to_be32(PAGE_SIZE);
795 
796 	err = gve_adminq_execute_cmd(priv, &cmd);
797 	if (err)
798 		goto free_device_descriptor;
799 
800 	err = gve_process_device_options(priv, descriptor, &dev_op_gqi_rda,
801 					 &dev_op_gqi_qpl, &dev_op_dqo_rda,
802 					 &dev_op_modify_ring,
803 					 &dev_op_jumbo_frames);
804 	if (err)
805 		goto free_device_descriptor;
806 
807 	/* If the GQI_RAW_ADDRESSING option is not enabled and the queue format
808 	 * is not set to GqiRda, choose the queue format in a priority order:
809 	 * DqoRda, GqiRda, GqiQpl. Use GqiQpl as default.
810 	 */
811 	if (dev_op_dqo_rda) {
812 		priv->queue_format = GVE_DQO_RDA_FORMAT;
813 		PMD_DRV_LOG(INFO, "Driver is running with DQO RDA queue format.");
814 		supported_features_mask =
815 			be32_to_cpu(dev_op_dqo_rda->supported_features_mask);
816 	} else if (dev_op_gqi_rda) {
817 		priv->queue_format = GVE_GQI_RDA_FORMAT;
818 		PMD_DRV_LOG(INFO, "Driver is running with GQI RDA queue format.");
819 		supported_features_mask =
820 			be32_to_cpu(dev_op_gqi_rda->supported_features_mask);
821 	} else if (priv->queue_format == GVE_GQI_RDA_FORMAT) {
822 		PMD_DRV_LOG(INFO, "Driver is running with GQI RDA queue format.");
823 	} else {
824 		priv->queue_format = GVE_GQI_QPL_FORMAT;
825 		if (dev_op_gqi_qpl)
826 			supported_features_mask =
827 				be32_to_cpu(dev_op_gqi_qpl->supported_features_mask);
828 		PMD_DRV_LOG(INFO, "Driver is running with GQI QPL queue format.");
829 	}
830 	if (gve_is_gqi(priv)) {
831 		err = gve_set_desc_cnt(priv, descriptor);
832 	} else {
833 		/* DQO supports LRO. */
834 		err = gve_set_desc_cnt_dqo(priv, descriptor, dev_op_dqo_rda);
835 	}
836 	if (err)
837 		goto free_device_descriptor;
838 
839 	priv->max_registered_pages =
840 				be64_to_cpu(descriptor->max_registered_pages);
841 	mtu = be16_to_cpu(descriptor->mtu);
842 	if (mtu < ETH_MIN_MTU) {
843 		PMD_DRV_LOG(ERR, "MTU %d below minimum MTU", mtu);
844 		err = -EINVAL;
845 		goto free_device_descriptor;
846 	}
847 	priv->max_mtu = mtu;
848 	priv->num_event_counters = be16_to_cpu(descriptor->counters);
849 	rte_memcpy(priv->dev_addr.addr_bytes, descriptor->mac, ETH_ALEN);
850 	PMD_DRV_LOG(INFO, "MAC addr: " RTE_ETHER_ADDR_PRT_FMT,
851 		    RTE_ETHER_ADDR_BYTES(&priv->dev_addr));
852 	priv->tx_pages_per_qpl = be16_to_cpu(descriptor->tx_pages_per_qpl);
853 
854 	priv->default_num_queues = be16_to_cpu(descriptor->default_num_queues);
855 
856 	gve_enable_supported_features(priv, supported_features_mask,
857 				      dev_op_modify_ring,
858 				      dev_op_jumbo_frames);
859 
860 free_device_descriptor:
861 	gve_free_dma_mem(&descriptor_dma_mem);
862 	return err;
863 }
864 
865 int gve_adminq_register_page_list(struct gve_priv *priv,
866 				  struct gve_queue_page_list *qpl)
867 {
868 	struct gve_dma_mem page_list_dma_mem;
869 	u32 num_entries = qpl->num_entries;
870 	u32 size = num_entries * sizeof(qpl->page_buses[0]);
871 	union gve_adminq_command cmd;
872 	__be64 *page_list;
873 	int err;
874 	u32 i;
875 
876 	memset(&cmd, 0, sizeof(cmd));
877 	page_list = gve_alloc_dma_mem(&page_list_dma_mem, size);
878 	if (!page_list)
879 		return -ENOMEM;
880 
881 	for (i = 0; i < num_entries; i++)
882 		page_list[i] = cpu_to_be64(qpl->page_buses[i]);
883 
884 	cmd.opcode = cpu_to_be32(GVE_ADMINQ_REGISTER_PAGE_LIST);
885 	cmd.reg_page_list = (struct gve_adminq_register_page_list) {
886 		.page_list_id = cpu_to_be32(qpl->id),
887 		.num_pages = cpu_to_be32(num_entries),
888 		.page_address_list_addr = cpu_to_be64(page_list_dma_mem.pa),
889 	};
890 
891 	err = gve_adminq_execute_cmd(priv, &cmd);
892 	gve_free_dma_mem(&page_list_dma_mem);
893 	return err;
894 }
895 
896 int gve_adminq_unregister_page_list(struct gve_priv *priv, u32 page_list_id)
897 {
898 	union gve_adminq_command cmd;
899 
900 	memset(&cmd, 0, sizeof(cmd));
901 	cmd.opcode = cpu_to_be32(GVE_ADMINQ_UNREGISTER_PAGE_LIST);
902 	cmd.unreg_page_list = (struct gve_adminq_unregister_page_list) {
903 		.page_list_id = cpu_to_be32(page_list_id),
904 	};
905 
906 	return gve_adminq_execute_cmd(priv, &cmd);
907 }
908 
909 int gve_adminq_set_mtu(struct gve_priv *priv, u64 mtu)
910 {
911 	union gve_adminq_command cmd;
912 
913 	memset(&cmd, 0, sizeof(cmd));
914 	cmd.opcode = cpu_to_be32(GVE_ADMINQ_SET_DRIVER_PARAMETER);
915 	cmd.set_driver_param = (struct gve_adminq_set_driver_parameter) {
916 		.parameter_type = cpu_to_be32(GVE_SET_PARAM_MTU),
917 		.parameter_value = cpu_to_be64(mtu),
918 	};
919 
920 	return gve_adminq_execute_cmd(priv, &cmd);
921 }
922 
923 int gve_adminq_report_stats(struct gve_priv *priv, u64 stats_report_len,
924 			    dma_addr_t stats_report_addr, u64 interval)
925 {
926 	union gve_adminq_command cmd;
927 
928 	memset(&cmd, 0, sizeof(cmd));
929 	cmd.opcode = cpu_to_be32(GVE_ADMINQ_REPORT_STATS);
930 	cmd.report_stats = (struct gve_adminq_report_stats) {
931 		.stats_report_len = cpu_to_be64(stats_report_len),
932 		.stats_report_addr = cpu_to_be64(stats_report_addr),
933 		.interval = cpu_to_be64(interval),
934 	};
935 
936 	return gve_adminq_execute_cmd(priv, &cmd);
937 }
938 
939 int gve_adminq_report_link_speed(struct gve_priv *priv)
940 {
941 	struct gve_dma_mem link_speed_region_dma_mem;
942 	union gve_adminq_command gvnic_cmd;
943 	u64 *link_speed_region;
944 	int err;
945 
946 	link_speed_region = gve_alloc_dma_mem(&link_speed_region_dma_mem,
947 					      sizeof(*link_speed_region));
948 
949 	if (!link_speed_region)
950 		return -ENOMEM;
951 
952 	memset(&gvnic_cmd, 0, sizeof(gvnic_cmd));
953 	gvnic_cmd.opcode = cpu_to_be32(GVE_ADMINQ_REPORT_LINK_SPEED);
954 	gvnic_cmd.report_link_speed.link_speed_address =
955 		cpu_to_be64(link_speed_region_dma_mem.pa);
956 
957 	err = gve_adminq_execute_cmd(priv, &gvnic_cmd);
958 
959 	priv->link_speed = be64_to_cpu(*link_speed_region);
960 	gve_free_dma_mem(&link_speed_region_dma_mem);
961 	return err;
962 }
963 
964 int gve_adminq_get_ptype_map_dqo(struct gve_priv *priv,
965 				 struct gve_ptype_lut *ptype_lut)
966 {
967 	struct gve_dma_mem ptype_map_dma_mem;
968 	struct gve_ptype_map *ptype_map;
969 	union gve_adminq_command cmd;
970 	int err = 0;
971 	int i;
972 
973 	memset(&cmd, 0, sizeof(cmd));
974 	ptype_map = gve_alloc_dma_mem(&ptype_map_dma_mem, sizeof(*ptype_map));
975 	if (!ptype_map)
976 		return -ENOMEM;
977 
978 	cmd.opcode = cpu_to_be32(GVE_ADMINQ_GET_PTYPE_MAP);
979 	cmd.get_ptype_map = (struct gve_adminq_get_ptype_map) {
980 		.ptype_map_len = cpu_to_be64(sizeof(*ptype_map)),
981 		.ptype_map_addr = cpu_to_be64(ptype_map_dma_mem.pa),
982 	};
983 
984 	err = gve_adminq_execute_cmd(priv, &cmd);
985 	if (err)
986 		goto err;
987 
988 	/* Populate ptype_lut. */
989 	for (i = 0; i < GVE_NUM_PTYPES; i++) {
990 		ptype_lut->ptypes[i].l3_type =
991 			ptype_map->ptypes[i].l3_type;
992 		ptype_lut->ptypes[i].l4_type =
993 			ptype_map->ptypes[i].l4_type;
994 	}
995 err:
996 	gve_free_dma_mem(&ptype_map_dma_mem);
997 	return err;
998 }
999 
1000 int gve_adminq_configure_rss(struct gve_priv *priv,
1001 			     struct gve_rss_config *rss_config)
1002 {
1003 	struct gve_dma_mem indirection_table_dma_mem;
1004 	struct gve_dma_mem rss_key_dma_mem;
1005 	union gve_adminq_command cmd;
1006 	__be32 *indir = NULL;
1007 	u8 *key = NULL;
1008 	int err = 0;
1009 	int i;
1010 
1011 	if (!rss_config->indir_size || !rss_config->key_size)
1012 		return -EINVAL;
1013 
1014 	indir = gve_alloc_dma_mem(&indirection_table_dma_mem,
1015 				  rss_config->indir_size *
1016 					sizeof(*rss_config->indir));
1017 	if (!indir) {
1018 		err = -ENOMEM;
1019 		goto out;
1020 	}
1021 	for (i = 0; i < rss_config->indir_size; i++)
1022 		indir[i] = cpu_to_be32(rss_config->indir[i]);
1023 
1024 	key = gve_alloc_dma_mem(&rss_key_dma_mem,
1025 				rss_config->key_size *
1026 					sizeof(*rss_config->key));
1027 	if (!key) {
1028 		err = -ENOMEM;
1029 		goto out;
1030 	}
1031 	memcpy(key, rss_config->key, rss_config->key_size);
1032 
1033 	memset(&cmd, 0, sizeof(cmd));
1034 	cmd.opcode = cpu_to_be32(GVE_ADMINQ_CONFIGURE_RSS);
1035 	cmd.configure_rss = (struct gve_adminq_configure_rss) {
1036 		.hash_types = cpu_to_be16(rss_config->hash_types),
1037 		.halg = rss_config->alg,
1038 		.hkey_len = cpu_to_be16(rss_config->key_size),
1039 		.indir_len = cpu_to_be16(rss_config->indir_size),
1040 		.hkey_addr = cpu_to_be64(rss_key_dma_mem.pa),
1041 		.indir_addr = cpu_to_be64(indirection_table_dma_mem.pa),
1042 	};
1043 
1044 	err = gve_adminq_execute_cmd(priv, &cmd);
1045 
1046 out:
1047 	if (indir)
1048 		gve_free_dma_mem(&indirection_table_dma_mem);
1049 	if (key)
1050 		gve_free_dma_mem(&rss_key_dma_mem);
1051 	return err;
1052 }
1053 
1054