xref: /netbsd-src/external/apache2/argon2/dist/phc-winner-argon2/src/core.c (revision 2c1df105e961e484f3ebd083ab24353b434005ed)
1 /*
2  * Argon2 reference source code package - reference C implementations
3  *
4  * Copyright 2015
5  * Daniel Dinu, Dmitry Khovratovich, Jean-Philippe Aumasson, and Samuel Neves
6  *
7  * You may use this work under the terms of a Creative Commons CC0 1.0
8  * License/Waiver or the Apache Public License 2.0, at your option. The terms of
9  * these licenses can be found at:
10  *
11  * - CC0 1.0 Universal : http://creativecommons.org/publicdomain/zero/1.0
12  * - Apache 2.0        : http://www.apache.org/licenses/LICENSE-2.0
13  *
14  * You should have received a copy of both of these licenses along with this
15  * software. If not, they may be obtained at the above URLs.
16  */
17 
18 /*For memory wiping*/
19 #ifdef _MSC_VER
20 #include <windows.h>
21 #include <winbase.h> /* For SecureZeroMemory */
22 #endif
23 #if defined __STDC_LIB_EXT1__
24 #define __STDC_WANT_LIB_EXT1__ 1
25 #endif
26 #define VC_GE_2005(version) (version >= 1400)
27 
28 /* for explicit_bzero() on glibc */
29 #define _DEFAULT_SOURCE
30 
31 #include <stdio.h>
32 #include <stdlib.h>
33 #include <string.h>
34 
35 #include "core.h"
36 #include "thread.h"
37 #include "blake2/blake2.h"
38 #include "blake2/blake2-impl.h"
39 
40 #ifdef GENKAT
41 #include "genkat.h"
42 #endif
43 
44 #if defined(__clang__)
45 #if __has_attribute(optnone)
46 #define NOT_OPTIMIZED __attribute__((optnone))
47 #endif
48 #elif defined(__GNUC__)
49 #define GCC_VERSION                                                            \
50     (__GNUC__ * 10000 + __GNUC_MINOR__ * 100 + __GNUC_PATCHLEVEL__)
51 #if GCC_VERSION >= 40400
52 #define NOT_OPTIMIZED __attribute__((optimize("O0")))
53 #endif
54 #endif
55 #ifndef NOT_OPTIMIZED
56 #define NOT_OPTIMIZED
57 #endif
58 
59 /***************Instance and Position constructors**********/
init_block_value(block * b,uint8_t in)60 void init_block_value(block *b, uint8_t in) { memset(b->v, in, sizeof(b->v)); }
61 
copy_block(block * dst,const block * src)62 void copy_block(block *dst, const block *src) {
63     memcpy(dst->v, src->v, sizeof(uint64_t) * ARGON2_QWORDS_IN_BLOCK);
64 }
65 
xor_block(block * dst,const block * src)66 void xor_block(block *dst, const block *src) {
67     int i;
68     for (i = 0; i < ARGON2_QWORDS_IN_BLOCK; ++i) {
69         dst->v[i] ^= src->v[i];
70     }
71 }
72 
load_block(block * dst,const void * input)73 static void load_block(block *dst, const void *input) {
74     unsigned i;
75     for (i = 0; i < ARGON2_QWORDS_IN_BLOCK; ++i) {
76         dst->v[i] = load64((const uint8_t *)input + i * sizeof(dst->v[i]));
77     }
78 }
79 
store_block(void * output,const block * src)80 static void store_block(void *output, const block *src) {
81     unsigned i;
82     for (i = 0; i < ARGON2_QWORDS_IN_BLOCK; ++i) {
83         store64((uint8_t *)output + i * sizeof(src->v[i]), src->v[i]);
84     }
85 }
86 
87 /***************Memory functions*****************/
88 
allocate_memory(const argon2_context * context,uint8_t ** memory,size_t num,size_t size)89 int allocate_memory(const argon2_context *context, uint8_t **memory,
90                     size_t num, size_t size) {
91     size_t memory_size = num*size;
92     if (memory == NULL) {
93         return ARGON2_MEMORY_ALLOCATION_ERROR;
94     }
95 
96     /* 1. Check for multiplication overflow */
97     if (size != 0 && memory_size / size != num) {
98         return ARGON2_MEMORY_ALLOCATION_ERROR;
99     }
100 
101     /* 2. Try to allocate with appropriate allocator */
102     if (context->allocate_cbk) {
103         (context->allocate_cbk)(memory, memory_size);
104     } else {
105         *memory = malloc(memory_size);
106     }
107 
108     if (*memory == NULL) {
109         return ARGON2_MEMORY_ALLOCATION_ERROR;
110     }
111 
112     return ARGON2_OK;
113 }
114 
free_memory(const argon2_context * context,uint8_t * memory,size_t num,size_t size)115 void free_memory(const argon2_context *context, uint8_t *memory,
116                  size_t num, size_t size) {
117     size_t memory_size = num*size;
118     clear_internal_memory(memory, memory_size);
119     if (context->free_cbk) {
120         (context->free_cbk)(memory, memory_size);
121     } else {
122         free(memory);
123     }
124 }
125 
126 #if defined(__OpenBSD__)
127 #define HAVE_EXPLICIT_BZERO 1
128 #elif defined(__NetBSD__)
129 #define HAVE_EXPLICIT_MEMSET 1
130 #elif defined(__GLIBC__) && defined(__GLIBC_PREREQ)
131 #if __GLIBC_PREREQ(2,25)
132 #define HAVE_EXPLICIT_BZERO 1
133 #endif
134 #endif
135 
secure_wipe_memory(void * v,size_t n)136 void NOT_OPTIMIZED secure_wipe_memory(void *v, size_t n) {
137 #if defined(_MSC_VER) && VC_GE_2005(_MSC_VER)
138     SecureZeroMemory(v, n);
139 #elif defined memset_s
140     memset_s(v, n, 0, n);
141 #elif defined(HAVE_EXPLICIT_BZERO)
142     explicit_bzero(v, n);
143 #elif defined(HAVE_EXPLICIT_MEMSET)
144     explicit_memset(v, 0, n);
145 #else
146     static void *(*const volatile memset_sec)(void *, int, size_t) = &memset;
147     memset_sec(v, 0, n);
148 #endif
149 }
150 
151 /* Memory clear flag defaults to true. */
152 int FLAG_clear_internal_memory = 1;
clear_internal_memory(void * v,size_t n)153 void clear_internal_memory(void *v, size_t n) {
154   if (FLAG_clear_internal_memory && v) {
155     secure_wipe_memory(v, n);
156   }
157 }
158 
finalize(const argon2_context * context,argon2_instance_t * instance)159 void finalize(const argon2_context *context, argon2_instance_t *instance) {
160     if (context != NULL && instance != NULL) {
161         block blockhash;
162         uint32_t l;
163 
164         copy_block(&blockhash, instance->memory + instance->lane_length - 1);
165 
166         /* XOR the last blocks */
167         for (l = 1; l < instance->lanes; ++l) {
168             uint32_t last_block_in_lane =
169                 l * instance->lane_length + (instance->lane_length - 1);
170             xor_block(&blockhash, instance->memory + last_block_in_lane);
171         }
172 
173         /* Hash the result */
174         {
175             uint8_t blockhash_bytes[ARGON2_BLOCK_SIZE];
176             store_block(blockhash_bytes, &blockhash);
177             blake2b_long(context->out, context->outlen, blockhash_bytes,
178                          ARGON2_BLOCK_SIZE);
179             /* clear blockhash and blockhash_bytes */
180             clear_internal_memory(blockhash.v, ARGON2_BLOCK_SIZE);
181             clear_internal_memory(blockhash_bytes, ARGON2_BLOCK_SIZE);
182         }
183 
184 #ifdef GENKAT
185         print_tag(context->out, context->outlen);
186 #endif
187 
188         free_memory(context, (uint8_t *)instance->memory,
189                     instance->memory_blocks, sizeof(block));
190     }
191 }
192 
index_alpha(const argon2_instance_t * instance,const argon2_position_t * position,uint32_t pseudo_rand,int same_lane)193 uint32_t index_alpha(const argon2_instance_t *instance,
194                      const argon2_position_t *position, uint32_t pseudo_rand,
195                      int same_lane) {
196     /*
197      * Pass 0:
198      *      This lane : all already finished segments plus already constructed
199      * blocks in this segment
200      *      Other lanes : all already finished segments
201      * Pass 1+:
202      *      This lane : (SYNC_POINTS - 1) last segments plus already constructed
203      * blocks in this segment
204      *      Other lanes : (SYNC_POINTS - 1) last segments
205      */
206     uint32_t reference_area_size;
207     uint64_t relative_position;
208     uint32_t start_position, absolute_position;
209 
210     if (0 == position->pass) {
211         /* First pass */
212         if (0 == position->slice) {
213             /* First slice */
214             reference_area_size =
215                 position->index - 1; /* all but the previous */
216         } else {
217             if (same_lane) {
218                 /* The same lane => add current segment */
219                 reference_area_size =
220                     position->slice * instance->segment_length +
221                     position->index - 1;
222             } else {
223                 reference_area_size =
224                     position->slice * instance->segment_length +
225                     ((position->index == 0) ? (-1) : 0);
226             }
227         }
228     } else {
229         /* Second pass */
230         if (same_lane) {
231             reference_area_size = instance->lane_length -
232                                   instance->segment_length + position->index -
233                                   1;
234         } else {
235             reference_area_size = instance->lane_length -
236                                   instance->segment_length +
237                                   ((position->index == 0) ? (-1) : 0);
238         }
239     }
240 
241     /* 1.2.4. Mapping pseudo_rand to 0..<reference_area_size-1> and produce
242      * relative position */
243     relative_position = pseudo_rand;
244     relative_position = relative_position * relative_position >> 32;
245     relative_position = reference_area_size - 1 -
246                         (reference_area_size * relative_position >> 32);
247 
248     /* 1.2.5 Computing starting position */
249     start_position = 0;
250 
251     if (0 != position->pass) {
252         start_position = (position->slice == ARGON2_SYNC_POINTS - 1)
253                              ? 0
254                              : (position->slice + 1) * instance->segment_length;
255     }
256 
257     /* 1.2.6. Computing absolute position */
258     absolute_position = (start_position + relative_position) %
259                         instance->lane_length; /* absolute position */
260     return absolute_position;
261 }
262 
263 /* Single-threaded version for p=1 case */
fill_memory_blocks_st(argon2_instance_t * instance)264 static int fill_memory_blocks_st(argon2_instance_t *instance) {
265     uint32_t r, s, l;
266 
267     for (r = 0; r < instance->passes; ++r) {
268         for (s = 0; s < ARGON2_SYNC_POINTS; ++s) {
269             for (l = 0; l < instance->lanes; ++l) {
270                 argon2_position_t position = {r, l, (uint8_t)s, 0};
271                 fill_segment(instance, position);
272             }
273         }
274 #ifdef GENKAT
275         internal_kat(instance, r); /* Print all memory blocks */
276 #endif
277     }
278     return ARGON2_OK;
279 }
280 
281 #if !defined(ARGON2_NO_THREADS)
282 
283 #ifdef _WIN32
fill_segment_thr(void * thread_data)284 static unsigned __stdcall fill_segment_thr(void *thread_data)
285 #else
286 static void *fill_segment_thr(void *thread_data)
287 #endif
288 {
289     argon2_thread_data *my_data = thread_data;
290     fill_segment(my_data->instance_ptr, my_data->pos);
291     argon2_thread_exit();
292     return 0;
293 }
294 
295 /* Multi-threaded version for p > 1 case */
fill_memory_blocks_mt(argon2_instance_t * instance)296 static int fill_memory_blocks_mt(argon2_instance_t *instance) {
297     uint32_t r, s;
298     argon2_thread_handle_t *thread = NULL;
299     argon2_thread_data *thr_data = NULL;
300     int rc = ARGON2_OK;
301 
302     /* 1. Allocating space for threads */
303     thread = calloc(instance->lanes, sizeof(argon2_thread_handle_t));
304     if (thread == NULL) {
305         rc = ARGON2_MEMORY_ALLOCATION_ERROR;
306         goto fail;
307     }
308 
309     thr_data = calloc(instance->lanes, sizeof(argon2_thread_data));
310     if (thr_data == NULL) {
311         rc = ARGON2_MEMORY_ALLOCATION_ERROR;
312         goto fail;
313     }
314 
315     for (r = 0; r < instance->passes; ++r) {
316         for (s = 0; s < ARGON2_SYNC_POINTS; ++s) {
317             uint32_t l, ll;
318 
319             /* 2. Calling threads */
320             for (l = 0; l < instance->lanes; ++l) {
321                 argon2_position_t position;
322 
323                 /* 2.1 Join a thread if limit is exceeded */
324                 if (l >= instance->threads) {
325                     if (argon2_thread_join(thread[l - instance->threads])) {
326                         rc = ARGON2_THREAD_FAIL;
327                         goto fail;
328                     }
329                 }
330 
331                 /* 2.2 Create thread */
332                 position.pass = r;
333                 position.lane = l;
334                 position.slice = (uint8_t)s;
335                 position.index = 0;
336                 thr_data[l].instance_ptr =
337                     instance; /* preparing the thread input */
338                 memcpy(&(thr_data[l].pos), &position,
339                        sizeof(argon2_position_t));
340                 if (argon2_thread_create(&thread[l], &fill_segment_thr,
341                                          (void *)&thr_data[l])) {
342                     /* Wait for already running threads */
343                     for (ll = 0; ll < l; ++ll)
344                         argon2_thread_join(thread[ll]);
345                     rc = ARGON2_THREAD_FAIL;
346                     goto fail;
347                 }
348 
349                 /* fill_segment(instance, position); */
350                 /*Non-thread equivalent of the lines above */
351             }
352 
353             /* 3. Joining remaining threads */
354             for (l = instance->lanes - instance->threads; l < instance->lanes;
355                  ++l) {
356                 if (argon2_thread_join(thread[l])) {
357                     rc = ARGON2_THREAD_FAIL;
358                     goto fail;
359                 }
360             }
361         }
362 
363 #ifdef GENKAT
364         internal_kat(instance, r); /* Print all memory blocks */
365 #endif
366     }
367 
368 fail:
369     if (thread != NULL) {
370         free(thread);
371     }
372     if (thr_data != NULL) {
373         free(thr_data);
374     }
375     return rc;
376 }
377 
378 #endif /* ARGON2_NO_THREADS */
379 
fill_memory_blocks(argon2_instance_t * instance)380 int fill_memory_blocks(argon2_instance_t *instance) {
381 	if (instance == NULL || instance->lanes == 0) {
382 	    return ARGON2_INCORRECT_PARAMETER;
383     }
384 #if defined(ARGON2_NO_THREADS)
385     return fill_memory_blocks_st(instance);
386 #else
387     return instance->threads == 1 ?
388 			fill_memory_blocks_st(instance) : fill_memory_blocks_mt(instance);
389 #endif
390 }
391 
validate_inputs(const argon2_context * context)392 int validate_inputs(const argon2_context *context) {
393     if (NULL == context) {
394         return ARGON2_INCORRECT_PARAMETER;
395     }
396 
397     if (NULL == context->out) {
398         return ARGON2_OUTPUT_PTR_NULL;
399     }
400 
401     /* Validate output length */
402     if (ARGON2_MIN_OUTLEN > context->outlen) {
403         return ARGON2_OUTPUT_TOO_SHORT;
404     }
405 
406     if (ARGON2_MAX_OUTLEN < context->outlen) {
407         return ARGON2_OUTPUT_TOO_LONG;
408     }
409 
410     /* Validate password (required param) */
411     if (NULL == context->pwd) {
412         if (0 != context->pwdlen) {
413             return ARGON2_PWD_PTR_MISMATCH;
414         }
415     }
416 
417     if (ARGON2_MIN_PWD_LENGTH + 1 > context->pwdlen + 1) {
418       return ARGON2_PWD_TOO_SHORT;
419     }
420 
421     if (ARGON2_MAX_PWD_LENGTH < context->pwdlen) {
422         return ARGON2_PWD_TOO_LONG;
423     }
424 
425     /* Validate salt (required param) */
426     if (NULL == context->salt) {
427         if (0 != context->saltlen) {
428             return ARGON2_SALT_PTR_MISMATCH;
429         }
430     }
431 
432     if (ARGON2_MIN_SALT_LENGTH > context->saltlen) {
433         return ARGON2_SALT_TOO_SHORT;
434     }
435 
436     if (ARGON2_MAX_SALT_LENGTH < context->saltlen) {
437         return ARGON2_SALT_TOO_LONG;
438     }
439 
440     /* Validate secret (optional param) */
441     if (NULL == context->secret) {
442         if (0 != context->secretlen) {
443             return ARGON2_SECRET_PTR_MISMATCH;
444         }
445     } else {
446         if (ARGON2_MIN_SECRET + 1 > context->secretlen + 1) {
447             return ARGON2_SECRET_TOO_SHORT;
448         }
449         if (ARGON2_MAX_SECRET < context->secretlen) {
450             return ARGON2_SECRET_TOO_LONG;
451         }
452     }
453 
454     /* Validate associated data (optional param) */
455     if (NULL == context->ad) {
456         if (0 != context->adlen) {
457             return ARGON2_AD_PTR_MISMATCH;
458         }
459     } else {
460         if (ARGON2_MIN_AD_LENGTH + 1 > context->adlen + 1) {
461             return ARGON2_AD_TOO_SHORT;
462         }
463         if (ARGON2_MAX_AD_LENGTH < context->adlen) {
464             return ARGON2_AD_TOO_LONG;
465         }
466     }
467 
468     /* Validate memory cost */
469     if (ARGON2_MIN_MEMORY > context->m_cost) {
470         return ARGON2_MEMORY_TOO_LITTLE;
471     }
472 
473     if (ARGON2_MAX_MEMORY - 1 < context->m_cost - 1) {
474         return ARGON2_MEMORY_TOO_MUCH;
475     }
476 
477     if (context->m_cost < 8 * context->lanes) {
478         return ARGON2_MEMORY_TOO_LITTLE;
479     }
480 
481     /* Validate time cost */
482     if (ARGON2_MIN_TIME > context->t_cost) {
483         return ARGON2_TIME_TOO_SMALL;
484     }
485 
486     if (ARGON2_MAX_TIME < context->t_cost) {
487         return ARGON2_TIME_TOO_LARGE;
488     }
489 
490     /* Validate lanes */
491     if (ARGON2_MIN_LANES > context->lanes) {
492         return ARGON2_LANES_TOO_FEW;
493     }
494 
495     if (ARGON2_MAX_LANES < context->lanes) {
496         return ARGON2_LANES_TOO_MANY;
497     }
498 
499     /* Validate threads */
500     if (ARGON2_MIN_THREADS > context->threads) {
501         return ARGON2_THREADS_TOO_FEW;
502     }
503 
504     if (ARGON2_MAX_THREADS < context->threads) {
505         return ARGON2_THREADS_TOO_MANY;
506     }
507 
508     if (NULL != context->allocate_cbk && NULL == context->free_cbk) {
509         return ARGON2_FREE_MEMORY_CBK_NULL;
510     }
511 
512     if (NULL == context->allocate_cbk && NULL != context->free_cbk) {
513         return ARGON2_ALLOCATE_MEMORY_CBK_NULL;
514     }
515 
516     return ARGON2_OK;
517 }
518 
fill_first_blocks(uint8_t * blockhash,const argon2_instance_t * instance)519 void fill_first_blocks(uint8_t *blockhash, const argon2_instance_t *instance) {
520     uint32_t l;
521     /* Make the first and second block in each lane as G(H0||0||i) or
522        G(H0||1||i) */
523     uint8_t blockhash_bytes[ARGON2_BLOCK_SIZE];
524     for (l = 0; l < instance->lanes; ++l) {
525 
526         store32(blockhash + ARGON2_PREHASH_DIGEST_LENGTH, 0);
527         store32(blockhash + ARGON2_PREHASH_DIGEST_LENGTH + 4, l);
528         blake2b_long(blockhash_bytes, ARGON2_BLOCK_SIZE, blockhash,
529                      ARGON2_PREHASH_SEED_LENGTH);
530         load_block(&instance->memory[l * instance->lane_length + 0],
531                    blockhash_bytes);
532 
533         store32(blockhash + ARGON2_PREHASH_DIGEST_LENGTH, 1);
534         blake2b_long(blockhash_bytes, ARGON2_BLOCK_SIZE, blockhash,
535                      ARGON2_PREHASH_SEED_LENGTH);
536         load_block(&instance->memory[l * instance->lane_length + 1],
537                    blockhash_bytes);
538     }
539     clear_internal_memory(blockhash_bytes, ARGON2_BLOCK_SIZE);
540 }
541 
initial_hash(uint8_t * blockhash,argon2_context * context,argon2_type type)542 void initial_hash(uint8_t *blockhash, argon2_context *context,
543                   argon2_type type) {
544     blake2b_state BlakeHash;
545     uint8_t value[sizeof(uint32_t)];
546 
547     if (NULL == context || NULL == blockhash) {
548         return;
549     }
550 
551     blake2b_init(&BlakeHash, ARGON2_PREHASH_DIGEST_LENGTH);
552 
553     store32(&value, context->lanes);
554     blake2b_update(&BlakeHash, (const uint8_t *)&value, sizeof(value));
555 
556     store32(&value, context->outlen);
557     blake2b_update(&BlakeHash, (const uint8_t *)&value, sizeof(value));
558 
559     store32(&value, context->m_cost);
560     blake2b_update(&BlakeHash, (const uint8_t *)&value, sizeof(value));
561 
562     store32(&value, context->t_cost);
563     blake2b_update(&BlakeHash, (const uint8_t *)&value, sizeof(value));
564 
565     store32(&value, context->version);
566     blake2b_update(&BlakeHash, (const uint8_t *)&value, sizeof(value));
567 
568     store32(&value, (uint32_t)type);
569     blake2b_update(&BlakeHash, (const uint8_t *)&value, sizeof(value));
570 
571     store32(&value, context->pwdlen);
572     blake2b_update(&BlakeHash, (const uint8_t *)&value, sizeof(value));
573 
574     if (context->pwd != NULL) {
575         blake2b_update(&BlakeHash, (const uint8_t *)context->pwd,
576                        context->pwdlen);
577 
578         if (context->flags & ARGON2_FLAG_CLEAR_PASSWORD) {
579             secure_wipe_memory(context->pwd, context->pwdlen);
580             context->pwdlen = 0;
581         }
582     }
583 
584     store32(&value, context->saltlen);
585     blake2b_update(&BlakeHash, (const uint8_t *)&value, sizeof(value));
586 
587     if (context->salt != NULL) {
588         blake2b_update(&BlakeHash, (const uint8_t *)context->salt,
589                        context->saltlen);
590     }
591 
592     store32(&value, context->secretlen);
593     blake2b_update(&BlakeHash, (const uint8_t *)&value, sizeof(value));
594 
595     if (context->secret != NULL) {
596         blake2b_update(&BlakeHash, (const uint8_t *)context->secret,
597                        context->secretlen);
598 
599         if (context->flags & ARGON2_FLAG_CLEAR_SECRET) {
600             secure_wipe_memory(context->secret, context->secretlen);
601             context->secretlen = 0;
602         }
603     }
604 
605     store32(&value, context->adlen);
606     blake2b_update(&BlakeHash, (const uint8_t *)&value, sizeof(value));
607 
608     if (context->ad != NULL) {
609         blake2b_update(&BlakeHash, (const uint8_t *)context->ad,
610                        context->adlen);
611     }
612 
613     blake2b_final(&BlakeHash, blockhash, ARGON2_PREHASH_DIGEST_LENGTH);
614 }
615 
initialize(argon2_instance_t * instance,argon2_context * context)616 int initialize(argon2_instance_t *instance, argon2_context *context) {
617     uint8_t blockhash[ARGON2_PREHASH_SEED_LENGTH];
618     int result = ARGON2_OK;
619 
620     if (instance == NULL || context == NULL)
621         return ARGON2_INCORRECT_PARAMETER;
622     instance->context_ptr = context;
623 
624     /* 1. Memory allocation */
625     result = allocate_memory(context, (uint8_t **)&(instance->memory),
626                              instance->memory_blocks, sizeof(block));
627     if (result != ARGON2_OK) {
628         return result;
629     }
630 
631     /* 2. Initial hashing */
632     /* H_0 + 8 extra bytes to produce the first blocks */
633     /* uint8_t blockhash[ARGON2_PREHASH_SEED_LENGTH]; */
634     /* Hashing all inputs */
635     initial_hash(blockhash, context, instance->type);
636     /* Zeroing 8 extra bytes */
637     clear_internal_memory(blockhash + ARGON2_PREHASH_DIGEST_LENGTH,
638                           ARGON2_PREHASH_SEED_LENGTH -
639                               ARGON2_PREHASH_DIGEST_LENGTH);
640 
641 #ifdef GENKAT
642     initial_kat(blockhash, context, instance->type);
643 #endif
644 
645     /* 3. Creating first blocks, we always have at least two blocks in a slice
646      */
647     fill_first_blocks(blockhash, instance);
648     /* Clearing the hash */
649     clear_internal_memory(blockhash, ARGON2_PREHASH_SEED_LENGTH);
650 
651     return ARGON2_OK;
652 }
653