1 /*- 2 * BSD LICENSE 3 * 4 * Copyright(c) 2010-2014 Intel Corporation. All rights reserved. 5 * All rights reserved. 6 * 7 * Redistribution and use in source and binary forms, with or without 8 * modification, are permitted provided that the following conditions 9 * are met: 10 * 11 * * Redistributions of source code must retain the above copyright 12 * notice, this list of conditions and the following disclaimer. 13 * * Redistributions in binary form must reproduce the above copyright 14 * notice, this list of conditions and the following disclaimer in 15 * the documentation and/or other materials provided with the 16 * distribution. 17 * * Neither the name of Intel Corporation nor the names of its 18 * contributors may be used to endorse or promote products derived 19 * from this software without specific prior written permission. 20 * 21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 22 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 24 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 25 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 26 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 27 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 28 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 29 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 30 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 31 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 32 */ 33 34 #include <string.h> 35 #include <stdio.h> 36 #include <stdlib.h> 37 #include <stdint.h> 38 #include <inttypes.h> 39 #include <stdarg.h> 40 #include <errno.h> 41 #include <sys/queue.h> 42 43 #include <rte_common.h> 44 #include <rte_log.h> 45 #include <rte_debug.h> 46 #include <rte_memory.h> 47 #include <rte_memzone.h> 48 #include <rte_launch.h> 49 #include <rte_cycles.h> 50 #include <rte_tailq.h> 51 #include <rte_eal.h> 52 #include <rte_per_lcore.h> 53 #include <rte_lcore.h> 54 #include <rte_atomic.h> 55 #include <rte_branch_prediction.h> 56 #include <rte_ring.h> 57 #include <rte_mempool.h> 58 #include <rte_spinlock.h> 59 #include <rte_malloc.h> 60 61 #include "test.h" 62 63 /* 64 * Mempool 65 * ======= 66 * 67 * Basic tests: done on one core with and without cache: 68 * 69 * - Get one object, put one object 70 * - Get two objects, put two objects 71 * - Get all objects, test that their content is not modified and 72 * put them back in the pool. 73 */ 74 75 #define N 65536 76 #define TIME_S 5 77 #define MEMPOOL_ELT_SIZE 2048 78 #define MAX_KEEP 128 79 #define MEMPOOL_SIZE ((RTE_MAX_LCORE*(MAX_KEEP+RTE_MEMPOOL_CACHE_MAX_SIZE))-1) 80 81 static struct rte_mempool *mp; 82 static struct rte_mempool *mp_cache, *mp_nocache; 83 84 static rte_atomic32_t synchro; 85 86 87 88 /* 89 * save the object number in the first 4 bytes of object data. All 90 * other bytes are set to 0. 91 */ 92 static void 93 my_obj_init(struct rte_mempool *mp, __attribute__((unused)) void *arg, 94 void *obj, unsigned i) 95 { 96 uint32_t *objnum = obj; 97 memset(obj, 0, mp->elt_size); 98 *objnum = i; 99 } 100 101 /* basic tests (done on one core) */ 102 static int 103 test_mempool_basic(void) 104 { 105 uint32_t *objnum; 106 void **objtable; 107 void *obj, *obj2; 108 char *obj_data; 109 int ret = 0; 110 unsigned i, j; 111 112 /* dump the mempool status */ 113 rte_mempool_dump(stdout, mp); 114 115 printf("get an object\n"); 116 if (rte_mempool_get(mp, &obj) < 0) 117 return -1; 118 rte_mempool_dump(stdout, mp); 119 120 /* tests that improve coverage */ 121 printf("get object count\n"); 122 if (rte_mempool_count(mp) != MEMPOOL_SIZE - 1) 123 return -1; 124 125 printf("get private data\n"); 126 if (rte_mempool_get_priv(mp) != 127 (char*) mp + MEMPOOL_HEADER_SIZE(mp, mp->pg_num)) 128 return -1; 129 130 printf("get physical address of an object\n"); 131 if (MEMPOOL_IS_CONTIG(mp) && 132 rte_mempool_virt2phy(mp, obj) != 133 (phys_addr_t) (mp->phys_addr + 134 (phys_addr_t) ((char*) obj - (char*) mp))) 135 return -1; 136 137 printf("put the object back\n"); 138 rte_mempool_put(mp, obj); 139 rte_mempool_dump(stdout, mp); 140 141 printf("get 2 objects\n"); 142 if (rte_mempool_get(mp, &obj) < 0) 143 return -1; 144 if (rte_mempool_get(mp, &obj2) < 0) { 145 rte_mempool_put(mp, obj); 146 return -1; 147 } 148 rte_mempool_dump(stdout, mp); 149 150 printf("put the objects back\n"); 151 rte_mempool_put(mp, obj); 152 rte_mempool_put(mp, obj2); 153 rte_mempool_dump(stdout, mp); 154 155 /* 156 * get many objects: we cannot get them all because the cache 157 * on other cores may not be empty. 158 */ 159 objtable = malloc(MEMPOOL_SIZE * sizeof(void *)); 160 if (objtable == NULL) { 161 return -1; 162 } 163 164 for (i=0; i<MEMPOOL_SIZE; i++) { 165 if (rte_mempool_get(mp, &objtable[i]) < 0) 166 break; 167 } 168 169 /* 170 * for each object, check that its content was not modified, 171 * and put objects back in pool 172 */ 173 while (i--) { 174 obj = objtable[i]; 175 obj_data = obj; 176 objnum = obj; 177 if (*objnum > MEMPOOL_SIZE) { 178 printf("bad object number\n"); 179 ret = -1; 180 break; 181 } 182 for (j=sizeof(*objnum); j<mp->elt_size; j++) { 183 if (obj_data[j] != 0) 184 ret = -1; 185 } 186 187 rte_mempool_put(mp, objtable[i]); 188 } 189 190 free(objtable); 191 if (ret == -1) 192 printf("objects were modified!\n"); 193 194 return ret; 195 } 196 197 static int test_mempool_creation_with_exceeded_cache_size(void) 198 { 199 struct rte_mempool *mp_cov; 200 201 mp_cov = rte_mempool_create("test_mempool_creation_with_exceeded_cache_size", MEMPOOL_SIZE, 202 MEMPOOL_ELT_SIZE, 203 RTE_MEMPOOL_CACHE_MAX_SIZE + 32, 0, 204 NULL, NULL, 205 my_obj_init, NULL, 206 SOCKET_ID_ANY, 0); 207 if(NULL != mp_cov) { 208 return -1; 209 } 210 211 return 0; 212 } 213 214 static struct rte_mempool *mp_spsc; 215 static rte_spinlock_t scsp_spinlock; 216 static void *scsp_obj_table[MAX_KEEP]; 217 218 /* 219 * single producer function 220 */ 221 static int test_mempool_single_producer(void) 222 { 223 unsigned int i; 224 void *obj = NULL; 225 uint64_t start_cycles, end_cycles; 226 uint64_t duration = rte_get_timer_hz() * 8; 227 228 start_cycles = rte_get_timer_cycles(); 229 while (1) { 230 end_cycles = rte_get_timer_cycles(); 231 /* duration uses up, stop producing */ 232 if (start_cycles + duration < end_cycles) 233 break; 234 rte_spinlock_lock(&scsp_spinlock); 235 for (i = 0; i < MAX_KEEP; i ++) { 236 if (NULL != scsp_obj_table[i]) { 237 obj = scsp_obj_table[i]; 238 break; 239 } 240 } 241 rte_spinlock_unlock(&scsp_spinlock); 242 if (i >= MAX_KEEP) { 243 continue; 244 } 245 if (rte_mempool_from_obj(obj) != mp_spsc) { 246 printf("test_mempool_single_producer there is an obj not owned by this mempool\n"); 247 return -1; 248 } 249 rte_mempool_sp_put(mp_spsc, obj); 250 rte_spinlock_lock(&scsp_spinlock); 251 scsp_obj_table[i] = NULL; 252 rte_spinlock_unlock(&scsp_spinlock); 253 } 254 255 return 0; 256 } 257 258 /* 259 * single consumer function 260 */ 261 static int test_mempool_single_consumer(void) 262 { 263 unsigned int i; 264 void * obj; 265 uint64_t start_cycles, end_cycles; 266 uint64_t duration = rte_get_timer_hz() * 5; 267 268 start_cycles = rte_get_timer_cycles(); 269 while (1) { 270 end_cycles = rte_get_timer_cycles(); 271 /* duration uses up, stop consuming */ 272 if (start_cycles + duration < end_cycles) 273 break; 274 rte_spinlock_lock(&scsp_spinlock); 275 for (i = 0; i < MAX_KEEP; i ++) { 276 if (NULL == scsp_obj_table[i]) 277 break; 278 } 279 rte_spinlock_unlock(&scsp_spinlock); 280 if (i >= MAX_KEEP) 281 continue; 282 if (rte_mempool_sc_get(mp_spsc, &obj) < 0) 283 break; 284 rte_spinlock_lock(&scsp_spinlock); 285 scsp_obj_table[i] = obj; 286 rte_spinlock_unlock(&scsp_spinlock); 287 } 288 289 return 0; 290 } 291 292 /* 293 * test function for mempool test based on singple consumer and single producer, can run on one lcore only 294 */ 295 static int test_mempool_launch_single_consumer(__attribute__((unused)) void *arg) 296 { 297 return test_mempool_single_consumer(); 298 } 299 300 static void my_mp_init(struct rte_mempool * mp, __attribute__((unused)) void * arg) 301 { 302 printf("mempool name is %s\n", mp->name); 303 /* nothing to be implemented here*/ 304 return ; 305 } 306 307 /* 308 * it tests the mempool operations based on singple producer and single consumer 309 */ 310 static int 311 test_mempool_sp_sc(void) 312 { 313 int ret = 0; 314 unsigned lcore_id = rte_lcore_id(); 315 unsigned lcore_next; 316 317 /* create a mempool with single producer/consumer ring */ 318 if (NULL == mp_spsc) { 319 mp_spsc = rte_mempool_create("test_mempool_sp_sc", MEMPOOL_SIZE, 320 MEMPOOL_ELT_SIZE, 0, 0, 321 my_mp_init, NULL, 322 my_obj_init, NULL, 323 SOCKET_ID_ANY, MEMPOOL_F_NO_CACHE_ALIGN | MEMPOOL_F_SP_PUT | MEMPOOL_F_SC_GET); 324 if (NULL == mp_spsc) { 325 return -1; 326 } 327 } 328 if (rte_mempool_lookup("test_mempool_sp_sc") != mp_spsc) { 329 printf("Cannot lookup mempool from its name\n"); 330 return -1; 331 } 332 lcore_next = rte_get_next_lcore(lcore_id, 0, 1); 333 if (RTE_MAX_LCORE <= lcore_next) 334 return -1; 335 if (rte_eal_lcore_role(lcore_next) != ROLE_RTE) 336 return -1; 337 rte_spinlock_init(&scsp_spinlock); 338 memset(scsp_obj_table, 0, sizeof(scsp_obj_table)); 339 rte_eal_remote_launch(test_mempool_launch_single_consumer, NULL, lcore_next); 340 if(test_mempool_single_producer() < 0) 341 ret = -1; 342 343 if(rte_eal_wait_lcore(lcore_next) < 0) 344 ret = -1; 345 346 return ret; 347 } 348 349 /* 350 * it tests some more basic of mempool 351 */ 352 static int 353 test_mempool_basic_ex(struct rte_mempool * mp) 354 { 355 unsigned i; 356 void **obj; 357 void *err_obj; 358 int ret = -1; 359 360 if (mp == NULL) 361 return ret; 362 363 obj = rte_calloc("test_mempool_basic_ex", MEMPOOL_SIZE , sizeof(void *), 0); 364 if (obj == NULL) { 365 printf("test_mempool_basic_ex fail to rte_malloc\n"); 366 return ret; 367 } 368 printf("test_mempool_basic_ex now mempool (%s) has %u free entries\n", mp->name, rte_mempool_free_count(mp)); 369 if (rte_mempool_full(mp) != 1) { 370 printf("test_mempool_basic_ex the mempool is not full but it should be\n"); 371 goto fail_mp_basic_ex; 372 } 373 374 for (i = 0; i < MEMPOOL_SIZE; i ++) { 375 if (rte_mempool_mc_get(mp, &obj[i]) < 0) { 376 printf("fail_mp_basic_ex fail to get mempool object for [%u]\n", i); 377 goto fail_mp_basic_ex; 378 } 379 } 380 if (rte_mempool_mc_get(mp, &err_obj) == 0) { 381 printf("test_mempool_basic_ex get an impossible obj from mempool\n"); 382 goto fail_mp_basic_ex; 383 } 384 printf("number: %u\n", i); 385 if (rte_mempool_empty(mp) != 1) { 386 printf("test_mempool_basic_ex the mempool is not empty but it should be\n"); 387 goto fail_mp_basic_ex; 388 } 389 390 for (i = 0; i < MEMPOOL_SIZE; i ++) { 391 rte_mempool_mp_put(mp, obj[i]); 392 } 393 if (rte_mempool_full(mp) != 1) { 394 printf("test_mempool_basic_ex the mempool is not full but it should be\n"); 395 goto fail_mp_basic_ex; 396 } 397 398 ret = 0; 399 400 fail_mp_basic_ex: 401 if (obj != NULL) 402 rte_free((void *)obj); 403 404 return ret; 405 } 406 407 static int 408 test_mempool_same_name_twice_creation(void) 409 { 410 struct rte_mempool *mp_tc; 411 412 mp_tc = rte_mempool_create("test_mempool_same_name_twice_creation", MEMPOOL_SIZE, 413 MEMPOOL_ELT_SIZE, 0, 0, 414 NULL, NULL, 415 NULL, NULL, 416 SOCKET_ID_ANY, 0); 417 if (NULL == mp_tc) 418 return -1; 419 420 mp_tc = rte_mempool_create("test_mempool_same_name_twice_creation", MEMPOOL_SIZE, 421 MEMPOOL_ELT_SIZE, 0, 0, 422 NULL, NULL, 423 NULL, NULL, 424 SOCKET_ID_ANY, 0); 425 if (NULL != mp_tc) 426 return -1; 427 428 return 0; 429 } 430 431 /* 432 * BAsic test for mempool_xmem functions. 433 */ 434 static int 435 test_mempool_xmem_misc(void) 436 { 437 uint32_t elt_num, total_size; 438 size_t sz; 439 ssize_t usz; 440 441 elt_num = MAX_KEEP; 442 total_size = rte_mempool_calc_obj_size(MEMPOOL_ELT_SIZE, 0, NULL); 443 sz = rte_mempool_xmem_size(elt_num, total_size, MEMPOOL_PG_SHIFT_MAX); 444 445 usz = rte_mempool_xmem_usage(NULL, elt_num, total_size, 0, 1, 446 MEMPOOL_PG_SHIFT_MAX); 447 448 if(sz != (size_t)usz) { 449 printf("failure @ %s: rte_mempool_xmem_usage(%u, %u) " 450 "returns: %#zx, while expected: %#zx;\n", 451 __func__, elt_num, total_size, sz, (size_t)usz); 452 return (-1); 453 } 454 455 return (0); 456 } 457 458 static int 459 test_mempool(void) 460 { 461 rte_atomic32_init(&synchro); 462 463 /* create a mempool (without cache) */ 464 if (mp_nocache == NULL) 465 mp_nocache = rte_mempool_create("test_nocache", MEMPOOL_SIZE, 466 MEMPOOL_ELT_SIZE, 0, 0, 467 NULL, NULL, 468 my_obj_init, NULL, 469 SOCKET_ID_ANY, 0); 470 if (mp_nocache == NULL) 471 return -1; 472 473 /* create a mempool (with cache) */ 474 if (mp_cache == NULL) 475 mp_cache = rte_mempool_create("test_cache", MEMPOOL_SIZE, 476 MEMPOOL_ELT_SIZE, 477 RTE_MEMPOOL_CACHE_MAX_SIZE, 0, 478 NULL, NULL, 479 my_obj_init, NULL, 480 SOCKET_ID_ANY, 0); 481 if (mp_cache == NULL) 482 return -1; 483 484 485 /* retrieve the mempool from its name */ 486 if (rte_mempool_lookup("test_nocache") != mp_nocache) { 487 printf("Cannot lookup mempool from its name\n"); 488 return -1; 489 } 490 491 rte_mempool_list_dump(stdout); 492 493 /* basic tests without cache */ 494 mp = mp_nocache; 495 if (test_mempool_basic() < 0) 496 return -1; 497 498 /* basic tests with cache */ 499 mp = mp_cache; 500 if (test_mempool_basic() < 0) 501 return -1; 502 503 /* more basic tests without cache */ 504 if (test_mempool_basic_ex(mp_nocache) < 0) 505 return -1; 506 507 /* mempool operation test based on single producer and single comsumer */ 508 if (test_mempool_sp_sc() < 0) 509 return -1; 510 511 if (test_mempool_creation_with_exceeded_cache_size() < 0) 512 return -1; 513 514 if (test_mempool_same_name_twice_creation() < 0) 515 return -1; 516 517 if (test_mempool_xmem_misc() < 0) 518 return -1; 519 520 rte_mempool_list_dump(stdout); 521 522 return 0; 523 } 524 525 static struct test_command mempool_cmd = { 526 .command = "mempool_autotest", 527 .callback = test_mempool, 528 }; 529 REGISTER_TEST_COMMAND(mempool_cmd); 530