1 /* SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0)
2 *
3 * Copyright 2008-2016 Freescale Semiconductor Inc.
4 * Copyright 2016,2019 NXP
5 */
6
7 #ifndef __RTA_SEC_RUN_TIME_ASM_H__
8 #define __RTA_SEC_RUN_TIME_ASM_H__
9
10 #include "desc.h"
11
12 /* hw/compat.h is not delivered in kernel */
13 #ifndef __KERNEL__
14 #include "compat.h"
15 #endif
16
17 /**
18 * enum rta_sec_era - SEC HW block revisions supported by the RTA library
19 * @RTA_SEC_ERA_1: SEC Era 1
20 * @RTA_SEC_ERA_2: SEC Era 2
21 * @RTA_SEC_ERA_3: SEC Era 3
22 * @RTA_SEC_ERA_4: SEC Era 4
23 * @RTA_SEC_ERA_5: SEC Era 5
24 * @RTA_SEC_ERA_6: SEC Era 6
25 * @RTA_SEC_ERA_7: SEC Era 7
26 * @RTA_SEC_ERA_8: SEC Era 8
27 * @MAX_SEC_ERA: maximum SEC HW block revision supported by RTA library
28 */
29 enum rta_sec_era {
30 RTA_SEC_ERA_1,
31 RTA_SEC_ERA_2,
32 RTA_SEC_ERA_3,
33 RTA_SEC_ERA_4,
34 RTA_SEC_ERA_5,
35 RTA_SEC_ERA_6,
36 RTA_SEC_ERA_7,
37 RTA_SEC_ERA_8,
38 RTA_SEC_ERA_9,
39 RTA_SEC_ERA_10,
40 MAX_SEC_ERA = RTA_SEC_ERA_10
41 };
42
43 /**
44 * DEFAULT_SEC_ERA - the default value for the SEC era in case the user provides
45 * an unsupported value.
46 */
47 #define DEFAULT_SEC_ERA MAX_SEC_ERA
48
49 /**
50 * USER_SEC_ERA - translates the SEC Era from internal to user representation.
51 * @sec_era: SEC Era in internal (library) representation
52 */
53 #define USER_SEC_ERA(sec_era) (sec_era + 1)
54
55 /**
56 * INTL_SEC_ERA - translates the SEC Era from user representation to internal.
57 * @sec_era: SEC Era in user representation
58 */
59 #define INTL_SEC_ERA(sec_era) (sec_era - 1)
60
61 /**
62 * enum rta_jump_type - Types of action taken by JUMP command
63 * @LOCAL_JUMP: conditional jump to an offset within the descriptor buffer
64 * @FAR_JUMP: conditional jump to a location outside the descriptor buffer,
65 * indicated by the POINTER field after the JUMP command.
66 * @HALT: conditional halt - stop the execution of the current descriptor and
67 * writes PKHA / Math condition bits as status / error code.
68 * @HALT_STATUS: conditional halt with user-specified status - stop the
69 * execution of the current descriptor and writes the value of
70 * "LOCAL OFFSET" JUMP field as status / error code.
71 * @GOSUB: conditional subroutine call - similar to @LOCAL_JUMP, but also saves
72 * return address in the Return Address register; subroutine calls
73 * cannot be nested.
74 * @RETURN: conditional subroutine return - similar to @LOCAL_JUMP, but the
75 * offset is taken from the Return Address register.
76 * @LOCAL_JUMP_INC: similar to @LOCAL_JUMP, but increment the register specified
77 * in "SRC_DST" JUMP field before evaluating the jump
78 * condition.
79 * @LOCAL_JUMP_DEC: similar to @LOCAL_JUMP, but decrement the register specified
80 * in "SRC_DST" JUMP field before evaluating the jump
81 * condition.
82 */
83 enum rta_jump_type {
84 LOCAL_JUMP,
85 FAR_JUMP,
86 HALT,
87 HALT_STATUS,
88 GOSUB,
89 RETURN,
90 LOCAL_JUMP_INC,
91 LOCAL_JUMP_DEC
92 };
93
94 /**
95 * enum rta_jump_cond - How test conditions are evaluated by JUMP command
96 * @ALL_TRUE: perform action if ALL selected conditions are true
97 * @ALL_FALSE: perform action if ALL selected conditions are false
98 * @ANY_TRUE: perform action if ANY of the selected conditions is true
99 * @ANY_FALSE: perform action if ANY of the selected conditions is false
100 */
101 enum rta_jump_cond {
102 ALL_TRUE,
103 ALL_FALSE,
104 ANY_TRUE,
105 ANY_FALSE
106 };
107
108 /**
109 * enum rta_share_type - Types of sharing for JOB_HDR and SHR_HDR commands
110 * @SHR_NEVER: nothing is shared; descriptors can execute in parallel (i.e. no
111 * dependencies are allowed between them).
112 * @SHR_WAIT: shared descriptor and keys are shared once the descriptor sets
113 * "OK to share" in DECO Control Register (DCTRL).
114 * @SHR_SERIAL: shared descriptor and keys are shared once the descriptor has
115 * completed.
116 * @SHR_ALWAYS: shared descriptor is shared anytime after the descriptor is
117 * loaded.
118 * @SHR_DEFER: valid only for JOB_HDR; sharing type is the one specified
119 * in the shared descriptor associated with the job descriptor.
120 */
121 enum rta_share_type {
122 SHR_NEVER,
123 SHR_WAIT,
124 SHR_SERIAL,
125 SHR_ALWAYS,
126 SHR_DEFER
127 };
128
129 /**
130 * enum rta_data_type - Indicates how is the data provided and how to include it
131 * in the descriptor.
132 * @RTA_DATA_PTR: Data is in memory and accessed by reference; data address is a
133 * physical (bus) address.
134 * @RTA_DATA_IMM: Data is inlined in descriptor and accessed as immediate data;
135 * data address is a virtual address.
136 * @RTA_DATA_IMM_DMA: (AIOP only) Data is inlined in descriptor and accessed as
137 * immediate data; data address is a physical (bus) address
138 * in external memory and CDMA is programmed to transfer the
139 * data into descriptor buffer being built in Workspace Area.
140 */
141 enum rta_data_type {
142 RTA_DATA_PTR = 1,
143 RTA_DATA_IMM,
144 RTA_DATA_IMM_DMA
145 };
146
147 /* Registers definitions */
148 enum rta_regs {
149 /* CCB Registers */
150 CONTEXT1 = 1,
151 CONTEXT2,
152 KEY1,
153 KEY2,
154 KEY1SZ,
155 KEY2SZ,
156 ICV1SZ,
157 ICV2SZ,
158 DATA1SZ,
159 DATA2SZ,
160 ALTDS1,
161 IV1SZ,
162 AAD1SZ,
163 MODE1,
164 MODE2,
165 CCTRL,
166 DCTRL,
167 ICTRL,
168 CLRW,
169 CSTAT,
170 IFIFO,
171 NFIFO,
172 OFIFO,
173 PKASZ,
174 PKBSZ,
175 PKNSZ,
176 PKESZ,
177 /* DECO Registers */
178 MATH0,
179 MATH1,
180 MATH2,
181 MATH3,
182 DESCBUF,
183 JOBDESCBUF,
184 SHAREDESCBUF,
185 DPOVRD,
186 DJQDA,
187 DSTAT,
188 DPID,
189 DJQCTRL,
190 ALTSOURCE,
191 SEQINSZ,
192 SEQOUTSZ,
193 VSEQINSZ,
194 VSEQOUTSZ,
195 /* PKHA Registers */
196 PKA,
197 PKN,
198 PKA0,
199 PKA1,
200 PKA2,
201 PKA3,
202 PKB,
203 PKB0,
204 PKB1,
205 PKB2,
206 PKB3,
207 PKE,
208 /* Pseudo registers */
209 AB1,
210 AB2,
211 ABD,
212 IFIFOABD,
213 IFIFOAB1,
214 IFIFOAB2,
215 AFHA_SBOX,
216 MDHA_SPLIT_KEY,
217 JOBSRC,
218 ZERO,
219 ONE,
220 AAD1,
221 IV1,
222 IV2,
223 MSG1,
224 MSG2,
225 MSG,
226 MSG_CKSUM,
227 MSGOUTSNOOP,
228 MSGINSNOOP,
229 ICV1,
230 ICV2,
231 SKIP,
232 NONE,
233 RNGOFIFO,
234 RNG,
235 IDFNS,
236 ODFNS,
237 NFIFOSZ,
238 SZ,
239 PAD,
240 SAD1,
241 AAD2,
242 BIT_DATA,
243 NFIFO_SZL,
244 NFIFO_SZM,
245 NFIFO_L,
246 NFIFO_M,
247 SZL,
248 SZM,
249 JOBDESCBUF_EFF,
250 SHAREDESCBUF_EFF,
251 METADATA,
252 GTR,
253 STR,
254 OFIFO_SYNC,
255 MSGOUTSNOOP_ALT
256 };
257
258 /* Command flags */
259 #define FLUSH1 BIT(0)
260 #define LAST1 BIT(1)
261 #define LAST2 BIT(2)
262 #define IMMED BIT(3)
263 #define SGF BIT(4)
264 #define VLF BIT(5)
265 #define EXT BIT(6)
266 #define CONT BIT(7)
267 #define SEQ BIT(8)
268 #define AIDF BIT(9)
269 #define FLUSH2 BIT(10)
270 #define CLASS1 BIT(11)
271 #define CLASS2 BIT(12)
272 #define BOTH BIT(13)
273
274 /**
275 * DCOPY - (AIOP only) command param is pointer to external memory
276 *
277 * CDMA must be used to transfer the key via DMA into Workspace Area.
278 * Valid only in combination with IMMED flag.
279 */
280 #define DCOPY BIT(30)
281
282 #define COPY BIT(31) /* command param is pointer (not immediate)
283 * valid only in combination when IMMED
284 */
285
286 #define __COPY_MASK (COPY | DCOPY)
287
288 /* SEQ IN/OUT PTR Command specific flags */
289 #define RBS BIT(16)
290 #define INL BIT(17)
291 #define PRE BIT(18)
292 #define RTO BIT(19)
293 #define RJD BIT(20)
294 #define SOP BIT(21)
295 #define RST BIT(22)
296 #define EWS BIT(23)
297
298 #define ENC BIT(14) /* Encrypted Key */
299 #define EKT BIT(15) /* AES CCM Encryption (default is
300 * AES ECB Encryption)
301 */
302 #define TK BIT(16) /* Trusted Descriptor Key (default is
303 * Job Descriptor Key)
304 */
305 #define NWB BIT(17) /* No Write Back Key */
306 #define PTS BIT(18) /* Plaintext Store */
307
308 /* HEADER Command specific flags */
309 #define RIF BIT(16)
310 #define DNR BIT(17)
311 #define CIF BIT(18)
312 #define PD BIT(19)
313 #define RSMS BIT(20)
314 #define TD BIT(21)
315 #define MTD BIT(22)
316 #define REO BIT(23)
317 #define SHR BIT(24)
318 #define SC BIT(25)
319 /* Extended HEADER specific flags */
320 #define DSV BIT(7)
321 #define DSEL_MASK 0x00000007 /* DECO Select */
322 #define FTD BIT(8)
323
324 /* JUMP Command specific flags */
325 #define NIFP BIT(20)
326 #define NIP BIT(21)
327 #define NOP BIT(22)
328 #define NCP BIT(23)
329 #define CALM BIT(24)
330
331 #define MATH_Z BIT(25)
332 #define MATH_N BIT(26)
333 #define MATH_NV BIT(27)
334 #define MATH_C BIT(28)
335 #define PK_0 BIT(29)
336 #define PK_GCD_1 BIT(30)
337 #define PK_PRIME BIT(31)
338 #define SELF BIT(0)
339 #define SHRD BIT(1)
340 #define JQP BIT(2)
341
342 /* NFIFOADD specific flags */
343 #define PAD_ZERO BIT(16)
344 #define PAD_NONZERO BIT(17)
345 #define PAD_INCREMENT BIT(18)
346 #define PAD_RANDOM BIT(19)
347 #define PAD_ZERO_N1 BIT(20)
348 #define PAD_NONZERO_0 BIT(21)
349 #define PAD_N1 BIT(23)
350 #define PAD_NONZERO_N BIT(24)
351 #define OC BIT(25)
352 #define BM BIT(26)
353 #define PR BIT(27)
354 #define PS BIT(28)
355 #define BP BIT(29)
356
357 /* MOVE Command specific flags */
358 #define WAITCOMP BIT(16)
359 #define SIZE_WORD BIT(17)
360 #define SIZE_BYTE BIT(18)
361 #define SIZE_DWORD BIT(19)
362
363 /* MATH command specific flags */
364 #define IFB MATH_IFB
365 #define NFU MATH_NFU
366 #define STL MATH_STL
367 #define SSEL MATH_SSEL
368 #define SWP MATH_SWP
369 #define IMMED2 BIT(31)
370
371 /**
372 * struct program - descriptor buffer management structure
373 * @current_pc: current offset in descriptor
374 * @current_instruction: current instruction in descriptor
375 * @first_error_pc: offset of the first error in descriptor
376 * @start_pc: start offset in descriptor buffer
377 * @buffer: buffer carrying descriptor
378 * @shrhdr: shared descriptor header
379 * @jobhdr: job descriptor header
380 * @ps: pointer fields size; if ps is true, pointers will be 36bits in
381 * length; if ps is false, pointers will be 32bits in length
382 * @bswap: if true, perform byte swap on a 4-byte boundary
383 */
384 struct program {
385 unsigned int current_pc;
386 unsigned int current_instruction;
387 unsigned int first_error_pc;
388 unsigned int start_pc;
389 uint32_t *buffer;
390 uint32_t *shrhdr;
391 uint32_t *jobhdr;
392 bool ps;
393 bool bswap;
394 };
395
396 static inline void
rta_program_cntxt_init(struct program * program,uint32_t * buffer,unsigned int offset)397 rta_program_cntxt_init(struct program *program,
398 uint32_t *buffer, unsigned int offset)
399 {
400 program->current_pc = 0;
401 program->current_instruction = 0;
402 program->first_error_pc = 0;
403 program->start_pc = offset;
404 program->buffer = buffer;
405 program->shrhdr = NULL;
406 program->jobhdr = NULL;
407 program->ps = false;
408 program->bswap = false;
409 }
410
411 static inline int
rta_program_finalize(struct program * program)412 rta_program_finalize(struct program *program)
413 {
414 /* Descriptor is usually not allowed to go beyond 64 words size */
415 if (program->current_pc > MAX_CAAM_DESCSIZE)
416 pr_debug("Descriptor Size exceeded max limit of 64 words");
417
418 /* Descriptor is erroneous */
419 if (program->first_error_pc) {
420 pr_err("Descriptor creation error\n");
421 return -EINVAL;
422 }
423
424 /* Update descriptor length in shared and job descriptor headers */
425 if (program->shrhdr != NULL)
426 *program->shrhdr |= program->bswap ?
427 swab32(program->current_pc) :
428 program->current_pc;
429 else if (program->jobhdr != NULL)
430 *program->jobhdr |= program->bswap ?
431 swab32(program->current_pc) :
432 program->current_pc;
433
434 return (int)program->current_pc;
435 }
436
437 static inline unsigned int
rta_program_set_36bit_addr(struct program * program)438 rta_program_set_36bit_addr(struct program *program)
439 {
440 program->ps = true;
441 return program->current_pc;
442 }
443
444 static inline unsigned int
rta_program_set_bswap(struct program * program)445 rta_program_set_bswap(struct program *program)
446 {
447 program->bswap = true;
448 return program->current_pc;
449 }
450
451 static inline void
__rta_out32(struct program * program,uint32_t val)452 __rta_out32(struct program *program, uint32_t val)
453 {
454 program->buffer[program->current_pc] = program->bswap ?
455 swab32(val) : val;
456 program->current_pc++;
457 }
458
459 static inline void
__rta_out_be32(struct program * program,uint32_t val)460 __rta_out_be32(struct program *program, uint32_t val)
461 {
462 program->buffer[program->current_pc] = cpu_to_be32(val);
463 program->current_pc++;
464 }
465
466 static inline void
__rta_out_le32(struct program * program,uint32_t val)467 __rta_out_le32(struct program *program, uint32_t val)
468 {
469 program->buffer[program->current_pc] = cpu_to_le32(val);
470 program->current_pc++;
471 }
472
473 static inline void
__rta_out64(struct program * program,bool is_ext,uint64_t val)474 __rta_out64(struct program *program, bool is_ext, uint64_t val)
475 {
476 if (is_ext) {
477 /*
478 * Since we are guaranteed only a 4-byte alignment in the
479 * descriptor buffer, we have to do 2 x 32-bit (word) writes.
480 * For the order of the 2 words to be correct, we need to
481 * take into account the endianness of the CPU.
482 */
483 #if __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__
484 __rta_out32(program, program->bswap ? lower_32_bits(val) :
485 upper_32_bits(val));
486
487 __rta_out32(program, program->bswap ? upper_32_bits(val) :
488 lower_32_bits(val));
489 #else
490 __rta_out32(program, program->bswap ? upper_32_bits(val) :
491 lower_32_bits(val));
492
493 __rta_out32(program, program->bswap ? lower_32_bits(val) :
494 upper_32_bits(val));
495 #endif
496 } else {
497 __rta_out32(program, lower_32_bits(val));
498 }
499 }
500
__rta_out_be64(struct program * program,bool is_ext,uint64_t val)501 static inline void __rta_out_be64(struct program *program, bool is_ext,
502 uint64_t val)
503 {
504 if (is_ext) {
505 __rta_out_be32(program, upper_32_bits(val));
506 __rta_out_be32(program, lower_32_bits(val));
507 } else {
508 __rta_out_be32(program, lower_32_bits(val));
509 }
510 }
511
__rta_out_le64(struct program * program,bool is_ext,uint64_t val)512 static inline void __rta_out_le64(struct program *program, bool is_ext,
513 uint64_t val)
514 {
515 if (is_ext) {
516 __rta_out_le32(program, lower_32_bits(val));
517 __rta_out_le32(program, upper_32_bits(val));
518 } else {
519 __rta_out_le32(program, lower_32_bits(val));
520 }
521 }
522
523 static inline unsigned int
rta_word(struct program * program,uint32_t val)524 rta_word(struct program *program, uint32_t val)
525 {
526 unsigned int start_pc = program->current_pc;
527
528 __rta_out32(program, val);
529
530 return start_pc;
531 }
532
533 static inline unsigned int
rta_dword(struct program * program,uint64_t val)534 rta_dword(struct program *program, uint64_t val)
535 {
536 unsigned int start_pc = program->current_pc;
537
538 __rta_out64(program, true, val);
539
540 return start_pc;
541 }
542
543 static inline uint32_t
inline_flags(enum rta_data_type data_type)544 inline_flags(enum rta_data_type data_type)
545 {
546 switch (data_type) {
547 case RTA_DATA_PTR:
548 return 0;
549 case RTA_DATA_IMM:
550 return IMMED | COPY;
551 case RTA_DATA_IMM_DMA:
552 return IMMED | DCOPY;
553 default:
554 /* warn and default to RTA_DATA_PTR */
555 pr_warn("RTA: defaulting to RTA_DATA_PTR parameter type\n");
556 return 0;
557 }
558 }
559
560 static inline unsigned int
rta_copy_data(struct program * program,uint8_t * data,unsigned int length)561 rta_copy_data(struct program *program, uint8_t *data, unsigned int length)
562 {
563 unsigned int i;
564 unsigned int start_pc = program->current_pc;
565 uint8_t *tmp = (uint8_t *)&program->buffer[program->current_pc];
566
567 for (i = 0; i < length; i++)
568 *tmp++ = data[i];
569 program->current_pc += (length + 3) / 4;
570
571 return start_pc;
572 }
573
574 #if defined(__EWL__) && defined(AIOP)
575 static inline void
__rta_dma_data(void * ws_dst,uint64_t ext_address,uint16_t size)576 __rta_dma_data(void *ws_dst, uint64_t ext_address, uint16_t size)
577 { cdma_read(ws_dst, ext_address, size); }
578 #else
579 static inline void
__rta_dma_data(void * ws_dst __maybe_unused,uint64_t ext_address __maybe_unused,uint16_t size __maybe_unused)580 __rta_dma_data(void *ws_dst __maybe_unused,
581 uint64_t ext_address __maybe_unused,
582 uint16_t size __maybe_unused)
583 { pr_warn("RTA: DCOPY not supported, DMA will be skipped\n"); }
584 #endif /* defined(__EWL__) && defined(AIOP) */
585
586 static inline void
__rta_inline_data(struct program * program,uint64_t data,uint32_t copy_data,uint32_t length)587 __rta_inline_data(struct program *program, uint64_t data,
588 uint32_t copy_data, uint32_t length)
589 {
590 if (!copy_data) {
591 __rta_out64(program, length > 4, data);
592 } else if (copy_data & COPY) {
593 uint8_t *tmp = (uint8_t *)&program->buffer[program->current_pc];
594 uint32_t i;
595
596 for (i = 0; i < length; i++)
597 *tmp++ = ((uint8_t *)(uintptr_t)data)[i];
598 program->current_pc += ((length + 3) / 4);
599 } else if (copy_data & DCOPY) {
600 __rta_dma_data(&program->buffer[program->current_pc], data,
601 (uint16_t)length);
602 program->current_pc += ((length + 3) / 4);
603 }
604 }
605
606 static inline unsigned int
rta_desc_len(uint32_t * buffer)607 rta_desc_len(uint32_t *buffer)
608 {
609 if ((*buffer & CMD_MASK) == CMD_DESC_HDR) {
610 return *buffer & HDR_DESCLEN_MASK;
611 } else {
612 if (rta_sec_era >= RTA_SEC_ERA_10)
613 return *buffer & HDR_DESCLEN_SHR_MASK_ERA10;
614 else
615 return *buffer & HDR_DESCLEN_SHR_MASK;
616 }
617 }
618
619 static inline unsigned int
rta_desc_bytes(uint32_t * buffer)620 rta_desc_bytes(uint32_t *buffer)
621 {
622 return (unsigned int)(rta_desc_len(buffer) * CAAM_CMD_SZ);
623 }
624
625 /**
626 * split_key_len - Compute MDHA split key length for a given algorithm
627 * @hash: Hashing algorithm selection, one of OP_ALG_ALGSEL_* or
628 * OP_PCLID_DKP_* - MD5, SHA1, SHA224, SHA256, SHA384, SHA512.
629 *
630 * Return: MDHA split key length
631 */
632 static inline uint32_t
split_key_len(uint32_t hash)633 split_key_len(uint32_t hash)
634 {
635 /* Sizes for MDHA pads (*not* keys): MD5, SHA1, 224, 256, 384, 512 */
636 static const uint8_t mdpadlen[] = { 16, 20, 32, 32, 64, 64 };
637 uint32_t idx;
638
639 idx = (hash & OP_ALG_ALGSEL_SUBMASK) >> OP_ALG_ALGSEL_SHIFT;
640
641 return (uint32_t)(mdpadlen[idx] * 2);
642 }
643
644 /**
645 * split_key_pad_len - Compute MDHA split key pad length for a given algorithm
646 * @hash: Hashing algorithm selection, one of OP_ALG_ALGSEL_* - MD5, SHA1,
647 * SHA224, SHA384, SHA512.
648 *
649 * Return: MDHA split key pad length
650 */
651 static inline uint32_t
split_key_pad_len(uint32_t hash)652 split_key_pad_len(uint32_t hash)
653 {
654 return ALIGN(split_key_len(hash), 16);
655 }
656
657 static inline unsigned int
rta_set_label(struct program * program)658 rta_set_label(struct program *program)
659 {
660 return program->current_pc + program->start_pc;
661 }
662
663 static inline int
rta_patch_move(struct program * program,int line,unsigned int new_ref)664 rta_patch_move(struct program *program, int line, unsigned int new_ref)
665 {
666 uint32_t opcode;
667 bool bswap = program->bswap;
668
669 if (line < 0)
670 return -EINVAL;
671
672 opcode = bswap ? swab32(program->buffer[line]) : program->buffer[line];
673
674 opcode &= (uint32_t)~MOVE_OFFSET_MASK;
675 opcode |= (new_ref << (MOVE_OFFSET_SHIFT + 2)) & MOVE_OFFSET_MASK;
676 program->buffer[line] = bswap ? swab32(opcode) : opcode;
677
678 return 0;
679 }
680
681 static inline int
rta_patch_jmp(struct program * program,int line,unsigned int new_ref)682 rta_patch_jmp(struct program *program, int line, unsigned int new_ref)
683 {
684 uint32_t opcode;
685 bool bswap = program->bswap;
686
687 if (line < 0)
688 return -EINVAL;
689
690 opcode = bswap ? swab32(program->buffer[line]) : program->buffer[line];
691
692 opcode &= (uint32_t)~JUMP_OFFSET_MASK;
693 opcode |= (new_ref - (line + program->start_pc)) & JUMP_OFFSET_MASK;
694 program->buffer[line] = bswap ? swab32(opcode) : opcode;
695
696 return 0;
697 }
698
699 static inline int
rta_patch_header(struct program * program,int line,unsigned int new_ref)700 rta_patch_header(struct program *program, int line, unsigned int new_ref)
701 {
702 uint32_t opcode;
703 bool bswap = program->bswap;
704
705 if (line < 0)
706 return -EINVAL;
707
708 opcode = bswap ? swab32(program->buffer[line]) : program->buffer[line];
709 if (rta_sec_era >= RTA_SEC_ERA_10) {
710 opcode &= (uint32_t)~HDR_START_IDX_MASK_ERA10;
711 opcode |= (new_ref << HDR_START_IDX_SHIFT) &
712 HDR_START_IDX_MASK_ERA10;
713 } else {
714 opcode &= (uint32_t)~HDR_START_IDX_MASK;
715 opcode |= (new_ref << HDR_START_IDX_SHIFT) & HDR_START_IDX_MASK;
716 }
717
718 program->buffer[line] = bswap ? swab32(opcode) : opcode;
719
720 return 0;
721 }
722
723 static inline int
rta_patch_load(struct program * program,int line,unsigned int new_ref)724 rta_patch_load(struct program *program, int line, unsigned int new_ref)
725 {
726 uint32_t opcode;
727 bool bswap = program->bswap;
728
729 if (line < 0)
730 return -EINVAL;
731
732 opcode = (bswap ? swab32(program->buffer[line]) :
733 program->buffer[line]) & (uint32_t)~LDST_OFFSET_MASK;
734
735 if (opcode & (LDST_SRCDST_WORD_DESCBUF | LDST_CLASS_DECO))
736 opcode |= (new_ref << LDST_OFFSET_SHIFT) & LDST_OFFSET_MASK;
737 else
738 opcode |= (new_ref << (LDST_OFFSET_SHIFT + 2)) &
739 LDST_OFFSET_MASK;
740
741 program->buffer[line] = bswap ? swab32(opcode) : opcode;
742
743 return 0;
744 }
745
746 static inline int
rta_patch_store(struct program * program,int line,unsigned int new_ref)747 rta_patch_store(struct program *program, int line, unsigned int new_ref)
748 {
749 uint32_t opcode;
750 bool bswap = program->bswap;
751
752 if (line < 0)
753 return -EINVAL;
754
755 opcode = bswap ? swab32(program->buffer[line]) : program->buffer[line];
756
757 opcode &= (uint32_t)~LDST_OFFSET_MASK;
758
759 switch (opcode & LDST_SRCDST_MASK) {
760 case LDST_SRCDST_WORD_DESCBUF:
761 case LDST_SRCDST_WORD_DESCBUF_JOB:
762 case LDST_SRCDST_WORD_DESCBUF_SHARED:
763 case LDST_SRCDST_WORD_DESCBUF_JOB_WE:
764 case LDST_SRCDST_WORD_DESCBUF_SHARED_WE:
765 opcode |= ((new_ref) << LDST_OFFSET_SHIFT) & LDST_OFFSET_MASK;
766 break;
767 default:
768 opcode |= (new_ref << (LDST_OFFSET_SHIFT + 2)) &
769 LDST_OFFSET_MASK;
770 }
771
772 program->buffer[line] = bswap ? swab32(opcode) : opcode;
773
774 return 0;
775 }
776
777 static inline int
rta_patch_raw(struct program * program,int line,unsigned int mask,unsigned int new_val)778 rta_patch_raw(struct program *program, int line, unsigned int mask,
779 unsigned int new_val)
780 {
781 uint32_t opcode;
782 bool bswap = program->bswap;
783
784 if (line < 0)
785 return -EINVAL;
786
787 opcode = bswap ? swab32(program->buffer[line]) : program->buffer[line];
788
789 opcode &= (uint32_t)~mask;
790 opcode |= new_val & mask;
791 program->buffer[line] = bswap ? swab32(opcode) : opcode;
792
793 return 0;
794 }
795
796 static inline int
__rta_map_opcode(uint32_t name,const uint32_t (* map_table)[2],unsigned int num_of_entries,uint32_t * val)797 __rta_map_opcode(uint32_t name, const uint32_t (*map_table)[2],
798 unsigned int num_of_entries, uint32_t *val)
799 {
800 unsigned int i;
801
802 for (i = 0; i < num_of_entries; i++)
803 if (map_table[i][0] == name) {
804 *val = map_table[i][1];
805 return 0;
806 }
807
808 return -EINVAL;
809 }
810
811 static inline void
__rta_map_flags(uint32_t flags,const uint32_t (* flags_table)[2],unsigned int num_of_entries,uint32_t * opcode)812 __rta_map_flags(uint32_t flags, const uint32_t (*flags_table)[2],
813 unsigned int num_of_entries, uint32_t *opcode)
814 {
815 unsigned int i;
816
817 for (i = 0; i < num_of_entries; i++) {
818 if (flags_table[i][0] & flags)
819 *opcode |= flags_table[i][1];
820 }
821 }
822
823 #endif /* __RTA_SEC_RUN_TIME_ASM_H__ */
824