1 /* SPDX-License-Identifier: BSD-3-Clause 2 * Copyright(c) 2010-2014 Intel Corporation 3 */ 4 5 #include <unistd.h> 6 #include <stdio.h> 7 #include <stdlib.h> 8 #include <stdint.h> 9 #include <signal.h> 10 #include <errno.h> 11 #include <string.h> 12 #include <fcntl.h> 13 #include <sys/types.h> 14 #include <sys/epoll.h> 15 #include <sys/queue.h> 16 #include <sys/time.h> 17 #include <sys/socket.h> 18 #include <sys/select.h> 19 #ifdef USE_JANSSON 20 #include <jansson.h> 21 #else 22 #pragma message "Jansson dev libs unavailable, not including JSON parsing" 23 #endif 24 #include <rte_log.h> 25 #include <rte_memory.h> 26 #include <rte_malloc.h> 27 #include <rte_atomic.h> 28 #include <rte_cycles.h> 29 #include <rte_ethdev.h> 30 #include <rte_pmd_i40e.h> 31 32 #include <libvirt/libvirt.h> 33 #include "channel_monitor.h" 34 #include "channel_commands.h" 35 #include "channel_manager.h" 36 #include "power_manager.h" 37 #include "oob_monitor.h" 38 39 #define RTE_LOGTYPE_CHANNEL_MONITOR RTE_LOGTYPE_USER1 40 41 #define MAX_EVENTS 256 42 43 uint64_t vsi_pkt_count_prev[384]; 44 uint64_t rdtsc_prev[384]; 45 #define MAX_JSON_STRING_LEN 1024 46 char json_data[MAX_JSON_STRING_LEN]; 47 48 double time_period_ms = 1; 49 static volatile unsigned run_loop = 1; 50 static int global_event_fd; 51 static unsigned int policy_is_set; 52 static struct epoll_event *global_events_list; 53 static struct policy policies[MAX_CLIENTS]; 54 55 #ifdef USE_JANSSON 56 57 union PFID { 58 struct ether_addr addr; 59 uint64_t pfid; 60 }; 61 62 static int 63 str_to_ether_addr(const char *a, struct ether_addr *ether_addr) 64 { 65 int i; 66 char *end; 67 unsigned long o[ETHER_ADDR_LEN]; 68 69 i = 0; 70 do { 71 errno = 0; 72 o[i] = strtoul(a, &end, 16); 73 if (errno != 0 || end == a || (end[0] != ':' && end[0] != 0)) 74 return -1; 75 a = end + 1; 76 } while (++i != RTE_DIM(o) / sizeof(o[0]) && end[0] != 0); 77 78 /* Junk at the end of line */ 79 if (end[0] != 0) 80 return -1; 81 82 /* Support the format XX:XX:XX:XX:XX:XX */ 83 if (i == ETHER_ADDR_LEN) { 84 while (i-- != 0) { 85 if (o[i] > UINT8_MAX) 86 return -1; 87 ether_addr->addr_bytes[i] = (uint8_t)o[i]; 88 } 89 /* Support the format XXXX:XXXX:XXXX */ 90 } else if (i == ETHER_ADDR_LEN / 2) { 91 while (i-- != 0) { 92 if (o[i] > UINT16_MAX) 93 return -1; 94 ether_addr->addr_bytes[i * 2] = 95 (uint8_t)(o[i] >> 8); 96 ether_addr->addr_bytes[i * 2 + 1] = 97 (uint8_t)(o[i] & 0xff); 98 } 99 /* unknown format */ 100 } else 101 return -1; 102 103 return 0; 104 } 105 106 static int 107 set_policy_mac(struct channel_packet *pkt, int idx, char *mac) 108 { 109 union PFID pfid; 110 int ret; 111 112 /* Use port MAC address as the vfid */ 113 ret = str_to_ether_addr(mac, &pfid.addr); 114 115 if (ret != 0) { 116 RTE_LOG(ERR, CHANNEL_MONITOR, 117 "Invalid mac address received in JSON\n"); 118 pkt->vfid[idx] = 0; 119 return -1; 120 } 121 122 printf("Received MAC Address: %02" PRIx8 ":%02" PRIx8 ":%02" PRIx8 ":" 123 "%02" PRIx8 ":%02" PRIx8 ":%02" PRIx8 "\n", 124 pfid.addr.addr_bytes[0], pfid.addr.addr_bytes[1], 125 pfid.addr.addr_bytes[2], pfid.addr.addr_bytes[3], 126 pfid.addr.addr_bytes[4], pfid.addr.addr_bytes[5]); 127 128 pkt->vfid[idx] = pfid.pfid; 129 return 0; 130 } 131 132 133 static int 134 parse_json_to_pkt(json_t *element, struct channel_packet *pkt) 135 { 136 const char *key; 137 json_t *value; 138 int ret; 139 140 memset(pkt, 0, sizeof(struct channel_packet)); 141 142 pkt->nb_mac_to_monitor = 0; 143 pkt->t_boost_status.tbEnabled = false; 144 pkt->workload = LOW; 145 pkt->policy_to_use = TIME; 146 pkt->command = PKT_POLICY; 147 pkt->core_type = CORE_TYPE_PHYSICAL; 148 149 json_object_foreach(element, key, value) { 150 if (!strcmp(key, "policy")) { 151 /* Recurse in to get the contents of profile */ 152 ret = parse_json_to_pkt(value, pkt); 153 if (ret) 154 return ret; 155 } else if (!strcmp(key, "instruction")) { 156 /* Recurse in to get the contents of instruction */ 157 ret = parse_json_to_pkt(value, pkt); 158 if (ret) 159 return ret; 160 } else if (!strcmp(key, "name")) { 161 strcpy(pkt->vm_name, json_string_value(value)); 162 } else if (!strcmp(key, "command")) { 163 char command[32]; 164 snprintf(command, 32, "%s", json_string_value(value)); 165 if (!strcmp(command, "power")) { 166 pkt->command = CPU_POWER; 167 } else if (!strcmp(command, "create")) { 168 pkt->command = PKT_POLICY; 169 } else if (!strcmp(command, "destroy")) { 170 pkt->command = PKT_POLICY_REMOVE; 171 } else { 172 RTE_LOG(ERR, CHANNEL_MONITOR, 173 "Invalid command received in JSON\n"); 174 return -1; 175 } 176 } else if (!strcmp(key, "policy_type")) { 177 char command[32]; 178 snprintf(command, 32, "%s", json_string_value(value)); 179 if (!strcmp(command, "TIME")) { 180 pkt->policy_to_use = TIME; 181 } else if (!strcmp(command, "TRAFFIC")) { 182 pkt->policy_to_use = TRAFFIC; 183 } else if (!strcmp(command, "WORKLOAD")) { 184 pkt->policy_to_use = WORKLOAD; 185 } else if (!strcmp(command, "BRANCH_RATIO")) { 186 pkt->policy_to_use = BRANCH_RATIO; 187 } else { 188 RTE_LOG(ERR, CHANNEL_MONITOR, 189 "Wrong policy_type received in JSON\n"); 190 return -1; 191 } 192 } else if (!strcmp(key, "workload")) { 193 char command[32]; 194 snprintf(command, 32, "%s", json_string_value(value)); 195 if (!strcmp(command, "HIGH")) { 196 pkt->workload = HIGH; 197 } else if (!strcmp(command, "MEDIUM")) { 198 pkt->workload = MEDIUM; 199 } else if (!strcmp(command, "LOW")) { 200 pkt->workload = LOW; 201 } else { 202 RTE_LOG(ERR, CHANNEL_MONITOR, 203 "Wrong workload received in JSON\n"); 204 return -1; 205 } 206 } else if (!strcmp(key, "busy_hours")) { 207 unsigned int i; 208 size_t size = json_array_size(value); 209 210 for (i = 0; i < size; i++) { 211 int hour = (int)json_integer_value( 212 json_array_get(value, i)); 213 pkt->timer_policy.busy_hours[i] = hour; 214 } 215 } else if (!strcmp(key, "quiet_hours")) { 216 unsigned int i; 217 size_t size = json_array_size(value); 218 219 for (i = 0; i < size; i++) { 220 int hour = (int)json_integer_value( 221 json_array_get(value, i)); 222 pkt->timer_policy.quiet_hours[i] = hour; 223 } 224 } else if (!strcmp(key, "core_list")) { 225 unsigned int i; 226 size_t size = json_array_size(value); 227 228 for (i = 0; i < size; i++) { 229 int core = (int)json_integer_value( 230 json_array_get(value, i)); 231 pkt->vcpu_to_control[i] = core; 232 } 233 pkt->num_vcpu = size; 234 } else if (!strcmp(key, "mac_list")) { 235 unsigned int i; 236 size_t size = json_array_size(value); 237 238 for (i = 0; i < size; i++) { 239 char mac[32]; 240 snprintf(mac, 32, "%s", json_string_value( 241 json_array_get(value, i))); 242 set_policy_mac(pkt, i, mac); 243 } 244 pkt->nb_mac_to_monitor = size; 245 } else if (!strcmp(key, "avg_packet_thresh")) { 246 pkt->traffic_policy.avg_max_packet_thresh = 247 (uint32_t)json_integer_value(value); 248 } else if (!strcmp(key, "max_packet_thresh")) { 249 pkt->traffic_policy.max_max_packet_thresh = 250 (uint32_t)json_integer_value(value); 251 } else if (!strcmp(key, "unit")) { 252 char unit[32]; 253 snprintf(unit, 32, "%s", json_string_value(value)); 254 if (!strcmp(unit, "SCALE_UP")) { 255 pkt->unit = CPU_POWER_SCALE_UP; 256 } else if (!strcmp(unit, "SCALE_DOWN")) { 257 pkt->unit = CPU_POWER_SCALE_DOWN; 258 } else if (!strcmp(unit, "SCALE_MAX")) { 259 pkt->unit = CPU_POWER_SCALE_MAX; 260 } else if (!strcmp(unit, "SCALE_MIN")) { 261 pkt->unit = CPU_POWER_SCALE_MIN; 262 } else if (!strcmp(unit, "ENABLE_TURBO")) { 263 pkt->unit = CPU_POWER_ENABLE_TURBO; 264 } else if (!strcmp(unit, "DISABLE_TURBO")) { 265 pkt->unit = CPU_POWER_DISABLE_TURBO; 266 } else { 267 RTE_LOG(ERR, CHANNEL_MONITOR, 268 "Invalid command received in JSON\n"); 269 return -1; 270 } 271 } else if (!strcmp(key, "resource_id")) { 272 pkt->resource_id = (uint32_t)json_integer_value(value); 273 } else { 274 RTE_LOG(ERR, CHANNEL_MONITOR, 275 "Unknown key received in JSON string: %s\n", 276 key); 277 } 278 } 279 return 0; 280 } 281 #endif 282 283 void channel_monitor_exit(void) 284 { 285 run_loop = 0; 286 rte_free(global_events_list); 287 } 288 289 static void 290 core_share(int pNo, int z, int x, int t) 291 { 292 if (policies[pNo].core_share[z].pcpu == lvm_info[x].pcpus[t]) { 293 if (strcmp(policies[pNo].pkt.vm_name, 294 lvm_info[x].vm_name) != 0) { 295 policies[pNo].core_share[z].status = 1; 296 power_manager_scale_core_max( 297 policies[pNo].core_share[z].pcpu); 298 } 299 } 300 } 301 302 static void 303 core_share_status(int pNo) 304 { 305 306 int noVms = 0, noVcpus = 0, z, x, t; 307 308 get_all_vm(&noVms, &noVcpus); 309 310 /* Reset Core Share Status. */ 311 for (z = 0; z < noVcpus; z++) 312 policies[pNo].core_share[z].status = 0; 313 314 /* Foreach vcpu in a policy. */ 315 for (z = 0; z < policies[pNo].pkt.num_vcpu; z++) { 316 /* Foreach VM on the platform. */ 317 for (x = 0; x < noVms; x++) { 318 /* Foreach vcpu of VMs on platform. */ 319 for (t = 0; t < lvm_info[x].num_cpus; t++) 320 core_share(pNo, z, x, t); 321 } 322 } 323 } 324 325 326 static int 327 pcpu_monitor(struct policy *pol, struct core_info *ci, int pcpu, int count) 328 { 329 int ret = 0; 330 331 if (pol->pkt.policy_to_use == BRANCH_RATIO) { 332 ci->cd[pcpu].oob_enabled = 1; 333 ret = add_core_to_monitor(pcpu); 334 if (ret == 0) 335 RTE_LOG(INFO, CHANNEL_MONITOR, 336 "Monitoring pcpu %d OOB for %s\n", 337 pcpu, pol->pkt.vm_name); 338 else 339 RTE_LOG(ERR, CHANNEL_MONITOR, 340 "Error monitoring pcpu %d OOB for %s\n", 341 pcpu, pol->pkt.vm_name); 342 343 } else { 344 pol->core_share[count].pcpu = pcpu; 345 RTE_LOG(INFO, CHANNEL_MONITOR, 346 "Monitoring pcpu %d for %s\n", 347 pcpu, pol->pkt.vm_name); 348 } 349 return ret; 350 } 351 352 static void 353 get_pcpu_to_control(struct policy *pol) 354 { 355 356 /* Convert vcpu to pcpu. */ 357 struct vm_info info; 358 int pcpu, count; 359 struct core_info *ci; 360 361 ci = get_core_info(); 362 363 RTE_LOG(DEBUG, CHANNEL_MONITOR, 364 "Looking for pcpu for %s\n", pol->pkt.vm_name); 365 366 /* 367 * So now that we're handling virtual and physical cores, we need to 368 * differenciate between them when adding them to the branch monitor. 369 * Virtual cores need to be converted to physical cores. 370 */ 371 if (pol->pkt.core_type == CORE_TYPE_VIRTUAL) { 372 /* 373 * If the cores in the policy are virtual, we need to map them 374 * to physical core. We look up the vm info and use that for 375 * the mapping. 376 */ 377 get_info_vm(pol->pkt.vm_name, &info); 378 for (count = 0; count < pol->pkt.num_vcpu; count++) { 379 pcpu = info.pcpu_map[pol->pkt.vcpu_to_control[count]]; 380 pcpu_monitor(pol, ci, pcpu, count); 381 } 382 } else { 383 /* 384 * If the cores in the policy are physical, we just use 385 * those core id's directly. 386 */ 387 for (count = 0; count < pol->pkt.num_vcpu; count++) { 388 pcpu = pol->pkt.vcpu_to_control[count]; 389 pcpu_monitor(pol, ci, pcpu, count); 390 } 391 } 392 } 393 394 static int 395 get_pfid(struct policy *pol) 396 { 397 398 int i, x, ret = 0; 399 400 for (i = 0; i < pol->pkt.nb_mac_to_monitor; i++) { 401 402 RTE_ETH_FOREACH_DEV(x) { 403 ret = rte_pmd_i40e_query_vfid_by_mac(x, 404 (struct ether_addr *)&(pol->pkt.vfid[i])); 405 if (ret != -EINVAL) { 406 pol->port[i] = x; 407 break; 408 } 409 } 410 if (ret == -EINVAL || ret == -ENOTSUP || ret == ENODEV) { 411 RTE_LOG(INFO, CHANNEL_MONITOR, 412 "Error with Policy. MAC not found on " 413 "attached ports "); 414 pol->enabled = 0; 415 return ret; 416 } 417 pol->pfid[i] = ret; 418 } 419 return 1; 420 } 421 422 static int 423 update_policy(struct channel_packet *pkt) 424 { 425 426 unsigned int updated = 0; 427 int i; 428 429 430 RTE_LOG(INFO, CHANNEL_MONITOR, 431 "Applying policy for %s\n", pkt->vm_name); 432 433 for (i = 0; i < MAX_CLIENTS; i++) { 434 if (strcmp(policies[i].pkt.vm_name, pkt->vm_name) == 0) { 435 /* Copy the contents of *pkt into the policy.pkt */ 436 policies[i].pkt = *pkt; 437 get_pcpu_to_control(&policies[i]); 438 if (get_pfid(&policies[i]) < 0) { 439 updated = 1; 440 break; 441 } 442 core_share_status(i); 443 policies[i].enabled = 1; 444 updated = 1; 445 } 446 } 447 if (!updated) { 448 for (i = 0; i < MAX_CLIENTS; i++) { 449 if (policies[i].enabled == 0) { 450 policies[i].pkt = *pkt; 451 get_pcpu_to_control(&policies[i]); 452 if (get_pfid(&policies[i]) < 0) 453 break; 454 core_share_status(i); 455 policies[i].enabled = 1; 456 break; 457 } 458 } 459 } 460 return 0; 461 } 462 463 static int 464 remove_policy(struct channel_packet *pkt __rte_unused) 465 { 466 int i; 467 468 /* 469 * Disabling the policy is simply a case of setting 470 * enabled to 0 471 */ 472 for (i = 0; i < MAX_CLIENTS; i++) { 473 if (strcmp(policies[i].pkt.vm_name, pkt->vm_name) == 0) { 474 policies[i].enabled = 0; 475 return 0; 476 } 477 } 478 return -1; 479 } 480 481 static uint64_t 482 get_pkt_diff(struct policy *pol) 483 { 484 485 uint64_t vsi_pkt_count, 486 vsi_pkt_total = 0, 487 vsi_pkt_count_prev_total = 0; 488 double rdtsc_curr, rdtsc_diff, diff; 489 int x; 490 struct rte_eth_stats vf_stats; 491 492 for (x = 0; x < pol->pkt.nb_mac_to_monitor; x++) { 493 494 /*Read vsi stats*/ 495 if (rte_pmd_i40e_get_vf_stats(x, pol->pfid[x], &vf_stats) == 0) 496 vsi_pkt_count = vf_stats.ipackets; 497 else 498 vsi_pkt_count = -1; 499 500 vsi_pkt_total += vsi_pkt_count; 501 502 vsi_pkt_count_prev_total += vsi_pkt_count_prev[pol->pfid[x]]; 503 vsi_pkt_count_prev[pol->pfid[x]] = vsi_pkt_count; 504 } 505 506 rdtsc_curr = rte_rdtsc_precise(); 507 rdtsc_diff = rdtsc_curr - rdtsc_prev[pol->pfid[x-1]]; 508 rdtsc_prev[pol->pfid[x-1]] = rdtsc_curr; 509 510 diff = (vsi_pkt_total - vsi_pkt_count_prev_total) * 511 ((double)rte_get_tsc_hz() / rdtsc_diff); 512 513 return diff; 514 } 515 516 static void 517 apply_traffic_profile(struct policy *pol) 518 { 519 520 int count; 521 uint64_t diff = 0; 522 523 diff = get_pkt_diff(pol); 524 525 if (diff >= (pol->pkt.traffic_policy.max_max_packet_thresh)) { 526 for (count = 0; count < pol->pkt.num_vcpu; count++) { 527 if (pol->core_share[count].status != 1) 528 power_manager_scale_core_max( 529 pol->core_share[count].pcpu); 530 } 531 } else if (diff >= (pol->pkt.traffic_policy.avg_max_packet_thresh)) { 532 for (count = 0; count < pol->pkt.num_vcpu; count++) { 533 if (pol->core_share[count].status != 1) 534 power_manager_scale_core_med( 535 pol->core_share[count].pcpu); 536 } 537 } else if (diff < (pol->pkt.traffic_policy.avg_max_packet_thresh)) { 538 for (count = 0; count < pol->pkt.num_vcpu; count++) { 539 if (pol->core_share[count].status != 1) 540 power_manager_scale_core_min( 541 pol->core_share[count].pcpu); 542 } 543 } 544 } 545 546 static void 547 apply_time_profile(struct policy *pol) 548 { 549 550 int count, x; 551 struct timeval tv; 552 struct tm *ptm; 553 char time_string[40]; 554 555 /* Obtain the time of day, and convert it to a tm struct. */ 556 gettimeofday(&tv, NULL); 557 ptm = localtime(&tv.tv_sec); 558 /* Format the date and time, down to a single second. */ 559 strftime(time_string, sizeof(time_string), "%Y-%m-%d %H:%M:%S", ptm); 560 561 for (x = 0; x < HOURS; x++) { 562 563 if (ptm->tm_hour == pol->pkt.timer_policy.busy_hours[x]) { 564 for (count = 0; count < pol->pkt.num_vcpu; count++) { 565 if (pol->core_share[count].status != 1) { 566 power_manager_scale_core_max( 567 pol->core_share[count].pcpu); 568 } 569 } 570 break; 571 } else if (ptm->tm_hour == 572 pol->pkt.timer_policy.quiet_hours[x]) { 573 for (count = 0; count < pol->pkt.num_vcpu; count++) { 574 if (pol->core_share[count].status != 1) { 575 power_manager_scale_core_min( 576 pol->core_share[count].pcpu); 577 } 578 } 579 break; 580 } else if (ptm->tm_hour == 581 pol->pkt.timer_policy.hours_to_use_traffic_profile[x]) { 582 apply_traffic_profile(pol); 583 break; 584 } 585 } 586 } 587 588 static void 589 apply_workload_profile(struct policy *pol) 590 { 591 592 int count; 593 594 if (pol->pkt.workload == HIGH) { 595 for (count = 0; count < pol->pkt.num_vcpu; count++) { 596 if (pol->core_share[count].status != 1) 597 power_manager_scale_core_max( 598 pol->core_share[count].pcpu); 599 } 600 } else if (pol->pkt.workload == MEDIUM) { 601 for (count = 0; count < pol->pkt.num_vcpu; count++) { 602 if (pol->core_share[count].status != 1) 603 power_manager_scale_core_med( 604 pol->core_share[count].pcpu); 605 } 606 } else if (pol->pkt.workload == LOW) { 607 for (count = 0; count < pol->pkt.num_vcpu; count++) { 608 if (pol->core_share[count].status != 1) 609 power_manager_scale_core_min( 610 pol->core_share[count].pcpu); 611 } 612 } 613 } 614 615 static void 616 apply_policy(struct policy *pol) 617 { 618 619 struct channel_packet *pkt = &pol->pkt; 620 621 /*Check policy to use*/ 622 if (pkt->policy_to_use == TRAFFIC) 623 apply_traffic_profile(pol); 624 else if (pkt->policy_to_use == TIME) 625 apply_time_profile(pol); 626 else if (pkt->policy_to_use == WORKLOAD) 627 apply_workload_profile(pol); 628 } 629 630 static int 631 process_request(struct channel_packet *pkt, struct channel_info *chan_info) 632 { 633 int ret; 634 635 if (chan_info == NULL) 636 return -1; 637 638 if (rte_atomic32_cmpset(&(chan_info->status), CHANNEL_MGR_CHANNEL_CONNECTED, 639 CHANNEL_MGR_CHANNEL_PROCESSING) == 0) 640 return -1; 641 642 if (pkt->command == CPU_POWER) { 643 unsigned int core_num; 644 645 if (pkt->core_type == CORE_TYPE_VIRTUAL) 646 core_num = get_pcpu(chan_info, pkt->resource_id); 647 else 648 core_num = pkt->resource_id; 649 650 RTE_LOG(DEBUG, CHANNEL_MONITOR, "Processing requested cmd for cpu:%d\n", 651 core_num); 652 653 switch (pkt->unit) { 654 case(CPU_POWER_SCALE_MIN): 655 power_manager_scale_core_min(core_num); 656 break; 657 case(CPU_POWER_SCALE_MAX): 658 power_manager_scale_core_max(core_num); 659 break; 660 case(CPU_POWER_SCALE_DOWN): 661 power_manager_scale_core_down(core_num); 662 break; 663 case(CPU_POWER_SCALE_UP): 664 power_manager_scale_core_up(core_num); 665 break; 666 case(CPU_POWER_ENABLE_TURBO): 667 power_manager_enable_turbo_core(core_num); 668 break; 669 case(CPU_POWER_DISABLE_TURBO): 670 power_manager_disable_turbo_core(core_num); 671 break; 672 default: 673 break; 674 } 675 } 676 677 if (pkt->command == PKT_POLICY) { 678 RTE_LOG(INFO, CHANNEL_MONITOR, "Processing policy request %s\n", 679 pkt->vm_name); 680 update_policy(pkt); 681 policy_is_set = 1; 682 } 683 684 if (pkt->command == PKT_POLICY_REMOVE) { 685 ret = remove_policy(pkt); 686 if (ret == 0) 687 RTE_LOG(INFO, CHANNEL_MONITOR, 688 "Removed policy %s\n", pkt->vm_name); 689 else 690 RTE_LOG(INFO, CHANNEL_MONITOR, 691 "Policy %s does not exist\n", pkt->vm_name); 692 } 693 694 /* 695 * Return is not checked as channel status may have been set to DISABLED 696 * from management thread 697 */ 698 rte_atomic32_cmpset(&(chan_info->status), CHANNEL_MGR_CHANNEL_PROCESSING, 699 CHANNEL_MGR_CHANNEL_CONNECTED); 700 return 0; 701 702 } 703 704 int 705 add_channel_to_monitor(struct channel_info **chan_info) 706 { 707 struct channel_info *info = *chan_info; 708 struct epoll_event event; 709 710 event.events = EPOLLIN; 711 event.data.ptr = info; 712 if (epoll_ctl(global_event_fd, EPOLL_CTL_ADD, info->fd, &event) < 0) { 713 RTE_LOG(ERR, CHANNEL_MONITOR, "Unable to add channel '%s' " 714 "to epoll\n", info->channel_path); 715 return -1; 716 } 717 RTE_LOG(ERR, CHANNEL_MONITOR, "Added channel '%s' " 718 "to monitor\n", info->channel_path); 719 return 0; 720 } 721 722 int 723 remove_channel_from_monitor(struct channel_info *chan_info) 724 { 725 if (epoll_ctl(global_event_fd, EPOLL_CTL_DEL, 726 chan_info->fd, NULL) < 0) { 727 RTE_LOG(ERR, CHANNEL_MONITOR, "Unable to remove channel '%s' " 728 "from epoll\n", chan_info->channel_path); 729 return -1; 730 } 731 return 0; 732 } 733 734 int 735 channel_monitor_init(void) 736 { 737 global_event_fd = epoll_create1(0); 738 if (global_event_fd == 0) { 739 RTE_LOG(ERR, CHANNEL_MONITOR, 740 "Error creating epoll context with error %s\n", 741 strerror(errno)); 742 return -1; 743 } 744 global_events_list = rte_malloc("epoll_events", 745 sizeof(*global_events_list) 746 * MAX_EVENTS, RTE_CACHE_LINE_SIZE); 747 if (global_events_list == NULL) { 748 RTE_LOG(ERR, CHANNEL_MONITOR, "Unable to rte_malloc for " 749 "epoll events\n"); 750 return -1; 751 } 752 return 0; 753 } 754 755 static void 756 read_binary_packet(struct channel_info *chan_info) 757 { 758 struct channel_packet pkt; 759 void *buffer = &pkt; 760 int buffer_len = sizeof(pkt); 761 int n_bytes, err = 0; 762 763 while (buffer_len > 0) { 764 n_bytes = read(chan_info->fd, 765 buffer, buffer_len); 766 if (n_bytes == buffer_len) 767 break; 768 if (n_bytes < 0) { 769 err = errno; 770 RTE_LOG(DEBUG, CHANNEL_MONITOR, 771 "Received error on " 772 "channel '%s' read: %s\n", 773 chan_info->channel_path, 774 strerror(err)); 775 remove_channel(&chan_info); 776 break; 777 } 778 buffer = (char *)buffer + n_bytes; 779 buffer_len -= n_bytes; 780 } 781 if (!err) 782 process_request(&pkt, chan_info); 783 } 784 785 #ifdef USE_JANSSON 786 static void 787 read_json_packet(struct channel_info *chan_info) 788 { 789 struct channel_packet pkt; 790 int n_bytes, ret; 791 json_t *root; 792 json_error_t error; 793 794 /* read opening brace to closing brace */ 795 do { 796 int idx = 0; 797 int indent = 0; 798 do { 799 n_bytes = read(chan_info->fd, &json_data[idx], 1); 800 if (n_bytes == 0) 801 break; 802 if (json_data[idx] == '{') 803 indent++; 804 if (json_data[idx] == '}') 805 indent--; 806 if ((indent > 0) || (idx > 0)) 807 idx++; 808 if (indent == 0) 809 json_data[idx] = 0; 810 if (idx >= MAX_JSON_STRING_LEN-1) 811 break; 812 } while (indent > 0); 813 814 if (indent > 0) 815 /* 816 * We've broken out of the read loop without getting 817 * a closing brace, so throw away the data 818 */ 819 json_data[idx] = 0; 820 821 if (strlen(json_data) == 0) 822 continue; 823 824 printf("got [%s]\n", json_data); 825 826 root = json_loads(json_data, 0, &error); 827 828 if (root) { 829 /* 830 * Because our data is now in the json 831 * object, we can overwrite the pkt 832 * with a channel_packet struct, using 833 * parse_json_to_pkt() 834 */ 835 ret = parse_json_to_pkt(root, &pkt); 836 json_decref(root); 837 if (ret) { 838 RTE_LOG(ERR, CHANNEL_MONITOR, 839 "Error validating JSON profile data\n"); 840 break; 841 } 842 process_request(&pkt, chan_info); 843 } else { 844 RTE_LOG(ERR, CHANNEL_MONITOR, 845 "JSON error on line %d: %s\n", 846 error.line, error.text); 847 } 848 } while (n_bytes > 0); 849 } 850 #endif 851 852 void 853 run_channel_monitor(void) 854 { 855 while (run_loop) { 856 int n_events, i; 857 858 n_events = epoll_wait(global_event_fd, global_events_list, 859 MAX_EVENTS, 1); 860 if (!run_loop) 861 break; 862 for (i = 0; i < n_events; i++) { 863 struct channel_info *chan_info = (struct channel_info *) 864 global_events_list[i].data.ptr; 865 if ((global_events_list[i].events & EPOLLERR) || 866 (global_events_list[i].events & EPOLLHUP)) { 867 RTE_LOG(INFO, CHANNEL_MONITOR, 868 "Remote closed connection for " 869 "channel '%s'\n", 870 chan_info->channel_path); 871 remove_channel(&chan_info); 872 continue; 873 } 874 if (global_events_list[i].events & EPOLLIN) { 875 876 switch (chan_info->type) { 877 case CHANNEL_TYPE_BINARY: 878 read_binary_packet(chan_info); 879 break; 880 #ifdef USE_JANSSON 881 case CHANNEL_TYPE_JSON: 882 read_json_packet(chan_info); 883 break; 884 #endif 885 default: 886 break; 887 } 888 } 889 } 890 rte_delay_us(time_period_ms*1000); 891 if (policy_is_set) { 892 int j; 893 894 for (j = 0; j < MAX_CLIENTS; j++) { 895 if (policies[j].enabled == 1) 896 apply_policy(&policies[j]); 897 } 898 } 899 } 900 } 901