1 /* SPDX-License-Identifier: BSD-3-Clause 2 * Copyright(c) 2010-2014 Intel Corporation 3 */ 4 5 #include <unistd.h> 6 #include <stdio.h> 7 #include <stdlib.h> 8 #include <stdint.h> 9 #include <signal.h> 10 #include <errno.h> 11 #include <string.h> 12 #include <fcntl.h> 13 #include <sys/types.h> 14 #include <sys/epoll.h> 15 #include <sys/queue.h> 16 #include <sys/time.h> 17 #include <sys/socket.h> 18 #include <sys/select.h> 19 #ifdef USE_JANSSON 20 #include <jansson.h> 21 #else 22 #pragma message "Jansson dev libs unavailable, not including JSON parsing" 23 #endif 24 #include <rte_string_fns.h> 25 #include <rte_log.h> 26 #include <rte_memory.h> 27 #include <rte_malloc.h> 28 #include <rte_atomic.h> 29 #include <rte_cycles.h> 30 #include <rte_ethdev.h> 31 #ifdef RTE_NET_I40E 32 #include <rte_pmd_i40e.h> 33 #endif 34 #include <rte_power.h> 35 36 #include <libvirt/libvirt.h> 37 #include "channel_monitor.h" 38 #include "channel_manager.h" 39 #include "power_manager.h" 40 #include "oob_monitor.h" 41 42 #define RTE_LOGTYPE_CHANNEL_MONITOR RTE_LOGTYPE_USER1 43 44 #define MAX_EVENTS 256 45 46 uint64_t vsi_pkt_count_prev[384]; 47 uint64_t rdtsc_prev[384]; 48 #define MAX_JSON_STRING_LEN 1024 49 char json_data[MAX_JSON_STRING_LEN]; 50 51 double time_period_ms = 1; 52 static volatile unsigned run_loop = 1; 53 static int global_event_fd; 54 static unsigned int policy_is_set; 55 static struct epoll_event *global_events_list; 56 static struct policy policies[RTE_MAX_LCORE]; 57 58 #ifdef USE_JANSSON 59 60 union PFID { 61 struct rte_ether_addr addr; 62 uint64_t pfid; 63 }; 64 65 static int 66 str_to_ether_addr(const char *a, struct rte_ether_addr *ether_addr) 67 { 68 int i; 69 char *end; 70 unsigned long o[RTE_ETHER_ADDR_LEN]; 71 72 i = 0; 73 do { 74 errno = 0; 75 o[i] = strtoul(a, &end, 16); 76 if (errno != 0 || end == a || (end[0] != ':' && end[0] != 0)) 77 return -1; 78 a = end + 1; 79 } while (++i != RTE_DIM(o) / sizeof(o[0]) && end[0] != 0); 80 81 /* Junk at the end of line */ 82 if (end[0] != 0) 83 return -1; 84 85 /* Support the format XX:XX:XX:XX:XX:XX */ 86 if (i == RTE_ETHER_ADDR_LEN) { 87 while (i-- != 0) { 88 if (o[i] > UINT8_MAX) 89 return -1; 90 ether_addr->addr_bytes[i] = (uint8_t)o[i]; 91 } 92 /* Support the format XXXX:XXXX:XXXX */ 93 } else if (i == RTE_ETHER_ADDR_LEN / 2) { 94 while (i-- != 0) { 95 if (o[i] > UINT16_MAX) 96 return -1; 97 ether_addr->addr_bytes[i * 2] = 98 (uint8_t)(o[i] >> 8); 99 ether_addr->addr_bytes[i * 2 + 1] = 100 (uint8_t)(o[i] & 0xff); 101 } 102 /* unknown format */ 103 } else 104 return -1; 105 106 return 0; 107 } 108 109 static int 110 set_policy_mac(struct rte_power_channel_packet *pkt, int idx, char *mac) 111 { 112 union PFID pfid; 113 int ret; 114 115 /* Use port MAC address as the vfid */ 116 ret = str_to_ether_addr(mac, &pfid.addr); 117 118 if (ret != 0) { 119 RTE_LOG(ERR, CHANNEL_MONITOR, 120 "Invalid mac address received in JSON\n"); 121 pkt->vfid[idx] = 0; 122 return -1; 123 } 124 125 printf("Received MAC Address: %02" PRIx8 ":%02" PRIx8 ":%02" PRIx8 ":" 126 "%02" PRIx8 ":%02" PRIx8 ":%02" PRIx8 "\n", 127 pfid.addr.addr_bytes[0], pfid.addr.addr_bytes[1], 128 pfid.addr.addr_bytes[2], pfid.addr.addr_bytes[3], 129 pfid.addr.addr_bytes[4], pfid.addr.addr_bytes[5]); 130 131 pkt->vfid[idx] = pfid.pfid; 132 return 0; 133 } 134 135 static char* 136 get_resource_name_from_chn_path(const char *channel_path) 137 { 138 char *substr = NULL; 139 140 substr = strstr(channel_path, CHANNEL_MGR_FIFO_PATTERN_NAME); 141 142 return substr; 143 } 144 145 static int 146 get_resource_id_from_vmname(const char *vm_name) 147 { 148 int result = -1; 149 int off = 0; 150 151 if (vm_name == NULL) 152 return -1; 153 154 while (vm_name[off] != '\0') { 155 if (isdigit(vm_name[off])) 156 break; 157 off++; 158 } 159 result = atoi(&vm_name[off]); 160 if ((result == 0) && (vm_name[off] != '0')) 161 return -1; 162 163 return result; 164 } 165 166 static int 167 parse_json_to_pkt(json_t *element, struct rte_power_channel_packet *pkt, 168 const char *vm_name) 169 { 170 const char *key; 171 json_t *value; 172 int ret; 173 int resource_id; 174 175 memset(pkt, 0, sizeof(*pkt)); 176 177 pkt->nb_mac_to_monitor = 0; 178 pkt->t_boost_status.tbEnabled = false; 179 pkt->workload = RTE_POWER_WL_LOW; 180 pkt->policy_to_use = RTE_POWER_POLICY_TIME; 181 pkt->command = RTE_POWER_PKT_POLICY; 182 pkt->core_type = RTE_POWER_CORE_TYPE_PHYSICAL; 183 184 if (vm_name == NULL) { 185 RTE_LOG(ERR, CHANNEL_MONITOR, 186 "vm_name is NULL, request rejected !\n"); 187 return -1; 188 } 189 190 json_object_foreach(element, key, value) { 191 if (!strcmp(key, "policy")) { 192 /* Recurse in to get the contents of profile */ 193 ret = parse_json_to_pkt(value, pkt, vm_name); 194 if (ret) 195 return ret; 196 } else if (!strcmp(key, "instruction")) { 197 /* Recurse in to get the contents of instruction */ 198 ret = parse_json_to_pkt(value, pkt, vm_name); 199 if (ret) 200 return ret; 201 } else if (!strcmp(key, "command")) { 202 char command[32]; 203 strlcpy(command, json_string_value(value), 32); 204 if (!strcmp(command, "power")) { 205 pkt->command = RTE_POWER_CPU_POWER; 206 } else if (!strcmp(command, "create")) { 207 pkt->command = RTE_POWER_PKT_POLICY; 208 } else if (!strcmp(command, "destroy")) { 209 pkt->command = RTE_POWER_PKT_POLICY_REMOVE; 210 } else { 211 RTE_LOG(ERR, CHANNEL_MONITOR, 212 "Invalid command received in JSON\n"); 213 return -1; 214 } 215 } else if (!strcmp(key, "policy_type")) { 216 char command[32]; 217 strlcpy(command, json_string_value(value), 32); 218 if (!strcmp(command, "TIME")) { 219 pkt->policy_to_use = 220 RTE_POWER_POLICY_TIME; 221 } else if (!strcmp(command, "TRAFFIC")) { 222 pkt->policy_to_use = 223 RTE_POWER_POLICY_TRAFFIC; 224 } else if (!strcmp(command, "WORKLOAD")) { 225 pkt->policy_to_use = 226 RTE_POWER_POLICY_WORKLOAD; 227 } else if (!strcmp(command, "BRANCH_RATIO")) { 228 pkt->policy_to_use = 229 RTE_POWER_POLICY_BRANCH_RATIO; 230 } else { 231 RTE_LOG(ERR, CHANNEL_MONITOR, 232 "Wrong policy_type received in JSON\n"); 233 return -1; 234 } 235 } else if (!strcmp(key, "workload")) { 236 char command[32]; 237 strlcpy(command, json_string_value(value), 32); 238 if (!strcmp(command, "HIGH")) { 239 pkt->workload = RTE_POWER_WL_HIGH; 240 } else if (!strcmp(command, "MEDIUM")) { 241 pkt->workload = RTE_POWER_WL_MEDIUM; 242 } else if (!strcmp(command, "LOW")) { 243 pkt->workload = RTE_POWER_WL_LOW; 244 } else { 245 RTE_LOG(ERR, CHANNEL_MONITOR, 246 "Wrong workload received in JSON\n"); 247 return -1; 248 } 249 } else if (!strcmp(key, "busy_hours")) { 250 unsigned int i; 251 size_t size = json_array_size(value); 252 253 for (i = 0; i < size; i++) { 254 int hour = (int)json_integer_value( 255 json_array_get(value, i)); 256 pkt->timer_policy.busy_hours[i] = hour; 257 } 258 } else if (!strcmp(key, "quiet_hours")) { 259 unsigned int i; 260 size_t size = json_array_size(value); 261 262 for (i = 0; i < size; i++) { 263 int hour = (int)json_integer_value( 264 json_array_get(value, i)); 265 pkt->timer_policy.quiet_hours[i] = hour; 266 } 267 } else if (!strcmp(key, "mac_list")) { 268 unsigned int i; 269 size_t size = json_array_size(value); 270 271 for (i = 0; i < size; i++) { 272 char mac[32]; 273 strlcpy(mac, 274 json_string_value(json_array_get(value, i)), 275 32); 276 set_policy_mac(pkt, i, mac); 277 } 278 pkt->nb_mac_to_monitor = size; 279 } else if (!strcmp(key, "avg_packet_thresh")) { 280 pkt->traffic_policy.avg_max_packet_thresh = 281 (uint32_t)json_integer_value(value); 282 } else if (!strcmp(key, "max_packet_thresh")) { 283 pkt->traffic_policy.max_max_packet_thresh = 284 (uint32_t)json_integer_value(value); 285 } else if (!strcmp(key, "unit")) { 286 char unit[32]; 287 strlcpy(unit, json_string_value(value), 32); 288 if (!strcmp(unit, "SCALE_UP")) { 289 pkt->unit = RTE_POWER_SCALE_UP; 290 } else if (!strcmp(unit, "SCALE_DOWN")) { 291 pkt->unit = RTE_POWER_SCALE_DOWN; 292 } else if (!strcmp(unit, "SCALE_MAX")) { 293 pkt->unit = RTE_POWER_SCALE_MAX; 294 } else if (!strcmp(unit, "SCALE_MIN")) { 295 pkt->unit = RTE_POWER_SCALE_MIN; 296 } else if (!strcmp(unit, "ENABLE_TURBO")) { 297 pkt->unit = RTE_POWER_ENABLE_TURBO; 298 } else if (!strcmp(unit, "DISABLE_TURBO")) { 299 pkt->unit = RTE_POWER_DISABLE_TURBO; 300 } else { 301 RTE_LOG(ERR, CHANNEL_MONITOR, 302 "Invalid command received in JSON\n"); 303 return -1; 304 } 305 } else { 306 RTE_LOG(ERR, CHANNEL_MONITOR, 307 "Unknown key received in JSON string: %s\n", 308 key); 309 } 310 311 resource_id = get_resource_id_from_vmname(vm_name); 312 if (resource_id < 0) { 313 RTE_LOG(ERR, CHANNEL_MONITOR, 314 "Could not get resource_id from vm_name:%s\n", 315 vm_name); 316 return -1; 317 } 318 strlcpy(pkt->vm_name, vm_name, RTE_POWER_VM_MAX_NAME_SZ); 319 pkt->resource_id = resource_id; 320 } 321 return 0; 322 } 323 #endif 324 325 void channel_monitor_exit(void) 326 { 327 run_loop = 0; 328 rte_free(global_events_list); 329 } 330 331 static void 332 core_share(int pNo, int z, int x, int t) 333 { 334 if (policies[pNo].core_share[z].pcpu == lvm_info[x].pcpus[t]) { 335 if (strcmp(policies[pNo].pkt.vm_name, 336 lvm_info[x].vm_name) != 0) { 337 policies[pNo].core_share[z].status = 1; 338 power_manager_scale_core_max( 339 policies[pNo].core_share[z].pcpu); 340 } 341 } 342 } 343 344 static void 345 core_share_status(int pNo) 346 { 347 348 int noVms = 0, noVcpus = 0, z, x, t; 349 350 get_all_vm(&noVms, &noVcpus); 351 352 /* Reset Core Share Status. */ 353 for (z = 0; z < noVcpus; z++) 354 policies[pNo].core_share[z].status = 0; 355 356 /* Foreach vcpu in a policy. */ 357 for (z = 0; z < policies[pNo].pkt.num_vcpu; z++) { 358 /* Foreach VM on the platform. */ 359 for (x = 0; x < noVms; x++) { 360 /* Foreach vcpu of VMs on platform. */ 361 for (t = 0; t < lvm_info[x].num_cpus; t++) 362 core_share(pNo, z, x, t); 363 } 364 } 365 } 366 367 368 static int 369 pcpu_monitor(struct policy *pol, struct core_info *ci, int pcpu, int count) 370 { 371 int ret = 0; 372 373 if (pol->pkt.policy_to_use == RTE_POWER_POLICY_BRANCH_RATIO) { 374 ci->cd[pcpu].oob_enabled = 1; 375 ret = add_core_to_monitor(pcpu); 376 if (ret == 0) 377 RTE_LOG(INFO, CHANNEL_MONITOR, 378 "Monitoring pcpu %d OOB for %s\n", 379 pcpu, pol->pkt.vm_name); 380 else 381 RTE_LOG(ERR, CHANNEL_MONITOR, 382 "Error monitoring pcpu %d OOB for %s\n", 383 pcpu, pol->pkt.vm_name); 384 385 } else { 386 pol->core_share[count].pcpu = pcpu; 387 RTE_LOG(INFO, CHANNEL_MONITOR, 388 "Monitoring pcpu %d for %s\n", 389 pcpu, pol->pkt.vm_name); 390 } 391 return ret; 392 } 393 394 static void 395 get_pcpu_to_control(struct policy *pol) 396 { 397 398 /* Convert vcpu to pcpu. */ 399 struct vm_info info; 400 int pcpu, count; 401 struct core_info *ci; 402 403 ci = get_core_info(); 404 405 RTE_LOG(DEBUG, CHANNEL_MONITOR, 406 "Looking for pcpu for %s\n", pol->pkt.vm_name); 407 408 /* 409 * So now that we're handling virtual and physical cores, we need to 410 * differenciate between them when adding them to the branch monitor. 411 * Virtual cores need to be converted to physical cores. 412 */ 413 if (pol->pkt.core_type == RTE_POWER_CORE_TYPE_VIRTUAL) { 414 /* 415 * If the cores in the policy are virtual, we need to map them 416 * to physical core. We look up the vm info and use that for 417 * the mapping. 418 */ 419 get_info_vm(pol->pkt.vm_name, &info); 420 for (count = 0; count < pol->pkt.num_vcpu; count++) { 421 pcpu = info.pcpu_map[pol->pkt.vcpu_to_control[count]]; 422 pcpu_monitor(pol, ci, pcpu, count); 423 } 424 } else { 425 /* 426 * If the cores in the policy are physical, we just use 427 * those core id's directly. 428 */ 429 for (count = 0; count < pol->pkt.num_vcpu; count++) { 430 pcpu = pol->pkt.vcpu_to_control[count]; 431 pcpu_monitor(pol, ci, pcpu, count); 432 } 433 } 434 } 435 436 static int 437 get_pfid(struct policy *pol) 438 { 439 440 int i, x, ret = 0; 441 442 for (i = 0; i < pol->pkt.nb_mac_to_monitor; i++) { 443 444 RTE_ETH_FOREACH_DEV(x) { 445 #ifdef RTE_NET_I40E 446 ret = rte_pmd_i40e_query_vfid_by_mac(x, 447 (struct rte_ether_addr *)&(pol->pkt.vfid[i])); 448 #else 449 ret = -ENOTSUP; 450 #endif 451 if (ret != -EINVAL) { 452 pol->port[i] = x; 453 break; 454 } 455 } 456 if (ret == -EINVAL || ret == -ENOTSUP || ret == ENODEV) { 457 RTE_LOG(INFO, CHANNEL_MONITOR, 458 "Error with Policy. MAC not found on " 459 "attached ports "); 460 pol->enabled = 0; 461 return ret; 462 } 463 pol->pfid[i] = ret; 464 } 465 return 1; 466 } 467 468 static int 469 update_policy(struct rte_power_channel_packet *pkt) 470 { 471 472 unsigned int updated = 0; 473 unsigned int i; 474 475 476 RTE_LOG(INFO, CHANNEL_MONITOR, 477 "Applying policy for %s\n", pkt->vm_name); 478 479 for (i = 0; i < RTE_DIM(policies); i++) { 480 if (strcmp(policies[i].pkt.vm_name, pkt->vm_name) == 0) { 481 /* Copy the contents of *pkt into the policy.pkt */ 482 policies[i].pkt = *pkt; 483 get_pcpu_to_control(&policies[i]); 484 /* Check Eth dev only for Traffic policy */ 485 if (policies[i].pkt.policy_to_use == 486 RTE_POWER_POLICY_TRAFFIC) { 487 if (get_pfid(&policies[i]) < 0) { 488 updated = 1; 489 break; 490 } 491 } 492 core_share_status(i); 493 policies[i].enabled = 1; 494 updated = 1; 495 } 496 } 497 if (!updated) { 498 for (i = 0; i < RTE_DIM(policies); i++) { 499 if (policies[i].enabled == 0) { 500 policies[i].pkt = *pkt; 501 get_pcpu_to_control(&policies[i]); 502 /* Check Eth dev only for Traffic policy */ 503 if (policies[i].pkt.policy_to_use == 504 RTE_POWER_POLICY_TRAFFIC) { 505 if (get_pfid(&policies[i]) < 0) { 506 updated = 1; 507 break; 508 } 509 } 510 core_share_status(i); 511 policies[i].enabled = 1; 512 break; 513 } 514 } 515 } 516 return 0; 517 } 518 519 static int 520 remove_policy(struct rte_power_channel_packet *pkt __rte_unused) 521 { 522 unsigned int i; 523 524 /* 525 * Disabling the policy is simply a case of setting 526 * enabled to 0 527 */ 528 for (i = 0; i < RTE_DIM(policies); i++) { 529 if (strcmp(policies[i].pkt.vm_name, pkt->vm_name) == 0) { 530 policies[i].enabled = 0; 531 return 0; 532 } 533 } 534 return -1; 535 } 536 537 static uint64_t 538 get_pkt_diff(struct policy *pol) 539 { 540 541 uint64_t vsi_pkt_count, 542 vsi_pkt_total = 0, 543 vsi_pkt_count_prev_total = 0; 544 double rdtsc_curr, rdtsc_diff, diff; 545 int x; 546 #ifdef RTE_NET_I40E 547 struct rte_eth_stats vf_stats; 548 #endif 549 550 for (x = 0; x < pol->pkt.nb_mac_to_monitor; x++) { 551 552 #ifdef RTE_NET_I40E 553 /*Read vsi stats*/ 554 if (rte_pmd_i40e_get_vf_stats(x, pol->pfid[x], &vf_stats) == 0) 555 vsi_pkt_count = vf_stats.ipackets; 556 else 557 vsi_pkt_count = -1; 558 #else 559 vsi_pkt_count = -1; 560 #endif 561 562 vsi_pkt_total += vsi_pkt_count; 563 564 vsi_pkt_count_prev_total += vsi_pkt_count_prev[pol->pfid[x]]; 565 vsi_pkt_count_prev[pol->pfid[x]] = vsi_pkt_count; 566 } 567 568 rdtsc_curr = rte_rdtsc_precise(); 569 rdtsc_diff = rdtsc_curr - rdtsc_prev[pol->pfid[x-1]]; 570 rdtsc_prev[pol->pfid[x-1]] = rdtsc_curr; 571 572 diff = (vsi_pkt_total - vsi_pkt_count_prev_total) * 573 ((double)rte_get_tsc_hz() / rdtsc_diff); 574 575 return diff; 576 } 577 578 static void 579 apply_traffic_profile(struct policy *pol) 580 { 581 582 int count; 583 uint64_t diff = 0; 584 585 diff = get_pkt_diff(pol); 586 587 if (diff >= (pol->pkt.traffic_policy.max_max_packet_thresh)) { 588 for (count = 0; count < pol->pkt.num_vcpu; count++) { 589 if (pol->core_share[count].status != 1) 590 power_manager_scale_core_max( 591 pol->core_share[count].pcpu); 592 } 593 } else if (diff >= (pol->pkt.traffic_policy.avg_max_packet_thresh)) { 594 for (count = 0; count < pol->pkt.num_vcpu; count++) { 595 if (pol->core_share[count].status != 1) 596 power_manager_scale_core_med( 597 pol->core_share[count].pcpu); 598 } 599 } else if (diff < (pol->pkt.traffic_policy.avg_max_packet_thresh)) { 600 for (count = 0; count < pol->pkt.num_vcpu; count++) { 601 if (pol->core_share[count].status != 1) 602 power_manager_scale_core_min( 603 pol->core_share[count].pcpu); 604 } 605 } 606 } 607 608 static void 609 apply_time_profile(struct policy *pol) 610 { 611 612 int count, x; 613 struct timeval tv; 614 struct tm *ptm; 615 char time_string[40]; 616 617 /* Obtain the time of day, and convert it to a tm struct. */ 618 gettimeofday(&tv, NULL); 619 ptm = localtime(&tv.tv_sec); 620 /* Format the date and time, down to a single second. */ 621 strftime(time_string, sizeof(time_string), "%Y-%m-%d %H:%M:%S", ptm); 622 623 for (x = 0; x < RTE_POWER_HOURS_PER_DAY; x++) { 624 625 if (ptm->tm_hour == pol->pkt.timer_policy.busy_hours[x]) { 626 for (count = 0; count < pol->pkt.num_vcpu; count++) { 627 if (pol->core_share[count].status != 1) { 628 power_manager_scale_core_max( 629 pol->core_share[count].pcpu); 630 } 631 } 632 break; 633 } else if (ptm->tm_hour == 634 pol->pkt.timer_policy.quiet_hours[x]) { 635 for (count = 0; count < pol->pkt.num_vcpu; count++) { 636 if (pol->core_share[count].status != 1) { 637 power_manager_scale_core_min( 638 pol->core_share[count].pcpu); 639 } 640 } 641 break; 642 } else if (ptm->tm_hour == 643 pol->pkt.timer_policy.hours_to_use_traffic_profile[x]) { 644 apply_traffic_profile(pol); 645 break; 646 } 647 } 648 } 649 650 static void 651 apply_workload_profile(struct policy *pol) 652 { 653 654 int count; 655 656 if (pol->pkt.workload == RTE_POWER_WL_HIGH) { 657 for (count = 0; count < pol->pkt.num_vcpu; count++) { 658 if (pol->core_share[count].status != 1) 659 power_manager_scale_core_max( 660 pol->core_share[count].pcpu); 661 } 662 } else if (pol->pkt.workload == RTE_POWER_WL_MEDIUM) { 663 for (count = 0; count < pol->pkt.num_vcpu; count++) { 664 if (pol->core_share[count].status != 1) 665 power_manager_scale_core_med( 666 pol->core_share[count].pcpu); 667 } 668 } else if (pol->pkt.workload == RTE_POWER_WL_LOW) { 669 for (count = 0; count < pol->pkt.num_vcpu; count++) { 670 if (pol->core_share[count].status != 1) 671 power_manager_scale_core_min( 672 pol->core_share[count].pcpu); 673 } 674 } 675 } 676 677 static void 678 apply_policy(struct policy *pol) 679 { 680 681 struct rte_power_channel_packet *pkt = &pol->pkt; 682 683 /*Check policy to use*/ 684 if (pkt->policy_to_use == RTE_POWER_POLICY_TRAFFIC) 685 apply_traffic_profile(pol); 686 else if (pkt->policy_to_use == RTE_POWER_POLICY_TIME) 687 apply_time_profile(pol); 688 else if (pkt->policy_to_use == RTE_POWER_POLICY_WORKLOAD) 689 apply_workload_profile(pol); 690 } 691 692 static int 693 write_binary_packet(void *buffer, 694 size_t buffer_len, 695 struct channel_info *chan_info) 696 { 697 int ret; 698 699 if (buffer_len == 0 || buffer == NULL) 700 return -1; 701 702 if (chan_info->fd < 0) { 703 RTE_LOG(ERR, CHANNEL_MONITOR, "Channel is not connected\n"); 704 return -1; 705 } 706 707 while (buffer_len > 0) { 708 ret = write(chan_info->fd, buffer, buffer_len); 709 if (ret == -1) { 710 if (errno == EINTR) 711 continue; 712 RTE_LOG(ERR, CHANNEL_MONITOR, "Write function failed due to %s.\n", 713 strerror(errno)); 714 return -1; 715 } 716 buffer = (char *)buffer + ret; 717 buffer_len -= ret; 718 } 719 return 0; 720 } 721 722 static int 723 send_freq(struct rte_power_channel_packet *pkt, 724 struct channel_info *chan_info, 725 bool freq_list) 726 { 727 unsigned int vcore_id = pkt->resource_id; 728 struct rte_power_channel_packet_freq_list channel_pkt_freq_list; 729 struct vm_info info; 730 731 if (get_info_vm(pkt->vm_name, &info) != 0) 732 return -1; 733 734 if (!freq_list && vcore_id >= RTE_POWER_MAX_VCPU_PER_VM) 735 return -1; 736 737 if (!info.allow_query) 738 return -1; 739 740 channel_pkt_freq_list.command = RTE_POWER_FREQ_LIST; 741 channel_pkt_freq_list.num_vcpu = info.num_vcpus; 742 743 if (freq_list) { 744 unsigned int i; 745 for (i = 0; i < info.num_vcpus; i++) 746 channel_pkt_freq_list.freq_list[i] = 747 power_manager_get_current_frequency(info.pcpu_map[i]); 748 } else { 749 channel_pkt_freq_list.freq_list[vcore_id] = 750 power_manager_get_current_frequency(info.pcpu_map[vcore_id]); 751 } 752 753 return write_binary_packet(&channel_pkt_freq_list, 754 sizeof(channel_pkt_freq_list), 755 chan_info); 756 } 757 758 static int 759 send_capabilities(struct rte_power_channel_packet *pkt, 760 struct channel_info *chan_info, 761 bool list_requested) 762 { 763 unsigned int vcore_id = pkt->resource_id; 764 struct rte_power_channel_packet_caps_list channel_pkt_caps_list; 765 struct vm_info info; 766 struct rte_power_core_capabilities caps; 767 int ret; 768 769 if (get_info_vm(pkt->vm_name, &info) != 0) 770 return -1; 771 772 if (!list_requested && vcore_id >= RTE_POWER_MAX_VCPU_PER_VM) 773 return -1; 774 775 if (!info.allow_query) 776 return -1; 777 778 channel_pkt_caps_list.command = RTE_POWER_CAPS_LIST; 779 channel_pkt_caps_list.num_vcpu = info.num_vcpus; 780 781 if (list_requested) { 782 unsigned int i; 783 for (i = 0; i < info.num_vcpus; i++) { 784 ret = rte_power_get_capabilities(info.pcpu_map[i], 785 &caps); 786 if (ret == 0) { 787 channel_pkt_caps_list.turbo[i] = 788 caps.turbo; 789 channel_pkt_caps_list.priority[i] = 790 caps.priority; 791 } else 792 return -1; 793 794 } 795 } else { 796 ret = rte_power_get_capabilities(info.pcpu_map[vcore_id], 797 &caps); 798 if (ret == 0) { 799 channel_pkt_caps_list.turbo[vcore_id] = 800 caps.turbo; 801 channel_pkt_caps_list.priority[vcore_id] = 802 caps.priority; 803 } else 804 return -1; 805 } 806 807 return write_binary_packet(&channel_pkt_caps_list, 808 sizeof(channel_pkt_caps_list), 809 chan_info); 810 } 811 812 static int 813 send_ack_for_received_cmd(struct rte_power_channel_packet *pkt, 814 struct channel_info *chan_info, 815 uint32_t command) 816 { 817 pkt->command = command; 818 return write_binary_packet(pkt, 819 sizeof(*pkt), 820 chan_info); 821 } 822 823 static int 824 process_request(struct rte_power_channel_packet *pkt, 825 struct channel_info *chan_info) 826 { 827 int ret; 828 829 if (chan_info == NULL) 830 return -1; 831 832 if (rte_atomic32_cmpset(&(chan_info->status), CHANNEL_MGR_CHANNEL_CONNECTED, 833 CHANNEL_MGR_CHANNEL_PROCESSING) == 0) 834 return -1; 835 836 if (pkt->command == RTE_POWER_CPU_POWER) { 837 unsigned int core_num; 838 839 if (pkt->core_type == RTE_POWER_CORE_TYPE_VIRTUAL) 840 core_num = get_pcpu(chan_info, pkt->resource_id); 841 else 842 core_num = pkt->resource_id; 843 844 RTE_LOG(DEBUG, CHANNEL_MONITOR, "Processing requested cmd for cpu:%d\n", 845 core_num); 846 847 int scale_res; 848 bool valid_unit = true; 849 850 switch (pkt->unit) { 851 case(RTE_POWER_SCALE_MIN): 852 scale_res = power_manager_scale_core_min(core_num); 853 break; 854 case(RTE_POWER_SCALE_MAX): 855 scale_res = power_manager_scale_core_max(core_num); 856 break; 857 case(RTE_POWER_SCALE_DOWN): 858 scale_res = power_manager_scale_core_down(core_num); 859 break; 860 case(RTE_POWER_SCALE_UP): 861 scale_res = power_manager_scale_core_up(core_num); 862 break; 863 case(RTE_POWER_ENABLE_TURBO): 864 scale_res = power_manager_enable_turbo_core(core_num); 865 break; 866 case(RTE_POWER_DISABLE_TURBO): 867 scale_res = power_manager_disable_turbo_core(core_num); 868 break; 869 default: 870 valid_unit = false; 871 break; 872 } 873 874 if (valid_unit) { 875 ret = send_ack_for_received_cmd(pkt, 876 chan_info, 877 scale_res >= 0 ? 878 RTE_POWER_CMD_ACK : 879 RTE_POWER_CMD_NACK); 880 if (ret < 0) 881 RTE_LOG(ERR, CHANNEL_MONITOR, "Error during sending ack command.\n"); 882 } else 883 RTE_LOG(ERR, CHANNEL_MONITOR, "Unexpected unit type.\n"); 884 885 } 886 887 if (pkt->command == RTE_POWER_PKT_POLICY) { 888 RTE_LOG(INFO, CHANNEL_MONITOR, "Processing policy request %s\n", 889 pkt->vm_name); 890 int ret = send_ack_for_received_cmd(pkt, 891 chan_info, 892 RTE_POWER_CMD_ACK); 893 if (ret < 0) 894 RTE_LOG(ERR, CHANNEL_MONITOR, "Error during sending ack command.\n"); 895 update_policy(pkt); 896 policy_is_set = 1; 897 } 898 899 if (pkt->command == RTE_POWER_PKT_POLICY_REMOVE) { 900 ret = remove_policy(pkt); 901 if (ret == 0) 902 RTE_LOG(INFO, CHANNEL_MONITOR, 903 "Removed policy %s\n", pkt->vm_name); 904 else 905 RTE_LOG(INFO, CHANNEL_MONITOR, 906 "Policy %s does not exist\n", pkt->vm_name); 907 } 908 909 if (pkt->command == RTE_POWER_QUERY_FREQ_LIST || 910 pkt->command == RTE_POWER_QUERY_FREQ) { 911 912 RTE_LOG(INFO, CHANNEL_MONITOR, 913 "Frequency for %s requested.\n", pkt->vm_name); 914 int ret = send_freq(pkt, 915 chan_info, 916 pkt->command == RTE_POWER_QUERY_FREQ_LIST); 917 if (ret < 0) 918 RTE_LOG(ERR, CHANNEL_MONITOR, "Error during frequency sending.\n"); 919 } 920 921 if (pkt->command == RTE_POWER_QUERY_CAPS_LIST || 922 pkt->command == RTE_POWER_QUERY_CAPS) { 923 924 RTE_LOG(INFO, CHANNEL_MONITOR, 925 "Capabilities for %s requested.\n", pkt->vm_name); 926 int ret = send_capabilities(pkt, 927 chan_info, 928 pkt->command == RTE_POWER_QUERY_CAPS_LIST); 929 if (ret < 0) 930 RTE_LOG(ERR, CHANNEL_MONITOR, "Error during sending capabilities.\n"); 931 } 932 933 /* 934 * Return is not checked as channel status may have been set to DISABLED 935 * from management thread 936 */ 937 rte_atomic32_cmpset(&(chan_info->status), CHANNEL_MGR_CHANNEL_PROCESSING, 938 CHANNEL_MGR_CHANNEL_CONNECTED); 939 return 0; 940 941 } 942 943 int 944 add_channel_to_monitor(struct channel_info **chan_info) 945 { 946 struct channel_info *info = *chan_info; 947 struct epoll_event event; 948 949 event.events = EPOLLIN; 950 event.data.ptr = info; 951 if (epoll_ctl(global_event_fd, EPOLL_CTL_ADD, info->fd, &event) < 0) { 952 RTE_LOG(ERR, CHANNEL_MONITOR, "Unable to add channel '%s' " 953 "to epoll\n", info->channel_path); 954 return -1; 955 } 956 RTE_LOG(ERR, CHANNEL_MONITOR, "Added channel '%s' " 957 "to monitor\n", info->channel_path); 958 return 0; 959 } 960 961 int 962 remove_channel_from_monitor(struct channel_info *chan_info) 963 { 964 if (epoll_ctl(global_event_fd, EPOLL_CTL_DEL, 965 chan_info->fd, NULL) < 0) { 966 RTE_LOG(ERR, CHANNEL_MONITOR, "Unable to remove channel '%s' " 967 "from epoll\n", chan_info->channel_path); 968 return -1; 969 } 970 return 0; 971 } 972 973 int 974 channel_monitor_init(void) 975 { 976 global_event_fd = epoll_create1(0); 977 if (global_event_fd == 0) { 978 RTE_LOG(ERR, CHANNEL_MONITOR, 979 "Error creating epoll context with error %s\n", 980 strerror(errno)); 981 return -1; 982 } 983 global_events_list = rte_malloc("epoll_events", 984 sizeof(*global_events_list) 985 * MAX_EVENTS, RTE_CACHE_LINE_SIZE); 986 if (global_events_list == NULL) { 987 RTE_LOG(ERR, CHANNEL_MONITOR, "Unable to rte_malloc for " 988 "epoll events\n"); 989 return -1; 990 } 991 return 0; 992 } 993 994 static void 995 read_binary_packet(struct channel_info *chan_info) 996 { 997 struct rte_power_channel_packet pkt; 998 void *buffer = &pkt; 999 int buffer_len = sizeof(pkt); 1000 int n_bytes, err = 0; 1001 1002 while (buffer_len > 0) { 1003 n_bytes = read(chan_info->fd, 1004 buffer, buffer_len); 1005 if (n_bytes == buffer_len) 1006 break; 1007 if (n_bytes < 0) { 1008 err = errno; 1009 RTE_LOG(DEBUG, CHANNEL_MONITOR, 1010 "Received error on " 1011 "channel '%s' read: %s\n", 1012 chan_info->channel_path, 1013 strerror(err)); 1014 remove_channel(&chan_info); 1015 break; 1016 } 1017 buffer = (char *)buffer + n_bytes; 1018 buffer_len -= n_bytes; 1019 } 1020 if (!err) 1021 process_request(&pkt, chan_info); 1022 } 1023 1024 #ifdef USE_JANSSON 1025 static void 1026 read_json_packet(struct channel_info *chan_info) 1027 { 1028 struct rte_power_channel_packet pkt; 1029 int n_bytes, ret; 1030 json_t *root; 1031 json_error_t error; 1032 const char *resource_name; 1033 char *start, *end; 1034 uint32_t n; 1035 1036 1037 /* read opening brace to closing brace */ 1038 do { 1039 int idx = 0; 1040 int indent = 0; 1041 do { 1042 n_bytes = read(chan_info->fd, &json_data[idx], 1); 1043 if (n_bytes == 0) 1044 break; 1045 if (json_data[idx] == '{') 1046 indent++; 1047 if (json_data[idx] == '}') 1048 indent--; 1049 if ((indent > 0) || (idx > 0)) 1050 idx++; 1051 if (indent <= 0) 1052 json_data[idx] = 0; 1053 if (idx >= MAX_JSON_STRING_LEN-1) 1054 break; 1055 } while (indent > 0); 1056 1057 json_data[idx] = '\0'; 1058 1059 if (strlen(json_data) == 0) 1060 continue; 1061 1062 printf("got [%s]\n", json_data); 1063 1064 root = json_loads(json_data, 0, &error); 1065 1066 if (root) { 1067 resource_name = get_resource_name_from_chn_path( 1068 chan_info->channel_path); 1069 /* 1070 * Because our data is now in the json 1071 * object, we can overwrite the pkt 1072 * with a rte_power_channel_packet struct, using 1073 * parse_json_to_pkt() 1074 */ 1075 ret = parse_json_to_pkt(root, &pkt, resource_name); 1076 json_decref(root); 1077 if (ret) { 1078 RTE_LOG(ERR, CHANNEL_MONITOR, 1079 "Error validating JSON profile data\n"); 1080 break; 1081 } 1082 start = strstr(pkt.vm_name, 1083 CHANNEL_MGR_FIFO_PATTERN_NAME); 1084 if (start != NULL) { 1085 /* move past pattern to start of fifo id */ 1086 start += strlen(CHANNEL_MGR_FIFO_PATTERN_NAME); 1087 1088 end = start; 1089 n = (uint32_t)strtoul(start, &end, 10); 1090 1091 if (end[0] == '\0') { 1092 /* Add core id to core list */ 1093 pkt.num_vcpu = 1; 1094 pkt.vcpu_to_control[0] = n; 1095 process_request(&pkt, chan_info); 1096 } else { 1097 RTE_LOG(ERR, CHANNEL_MONITOR, 1098 "Cannot extract core id from fifo name\n"); 1099 } 1100 } else { 1101 process_request(&pkt, chan_info); 1102 } 1103 } else { 1104 RTE_LOG(ERR, CHANNEL_MONITOR, 1105 "JSON error on line %d: %s\n", 1106 error.line, error.text); 1107 } 1108 } while (n_bytes > 0); 1109 } 1110 #endif 1111 1112 void 1113 run_channel_monitor(void) 1114 { 1115 while (run_loop) { 1116 int n_events, i; 1117 1118 n_events = epoll_wait(global_event_fd, global_events_list, 1119 MAX_EVENTS, 1); 1120 if (!run_loop) 1121 break; 1122 for (i = 0; i < n_events; i++) { 1123 struct channel_info *chan_info = (struct channel_info *) 1124 global_events_list[i].data.ptr; 1125 if ((global_events_list[i].events & EPOLLERR) || 1126 (global_events_list[i].events & EPOLLHUP)) { 1127 RTE_LOG(INFO, CHANNEL_MONITOR, 1128 "Remote closed connection for " 1129 "channel '%s'\n", 1130 chan_info->channel_path); 1131 remove_channel(&chan_info); 1132 continue; 1133 } 1134 if (global_events_list[i].events & EPOLLIN) { 1135 1136 switch (chan_info->type) { 1137 case CHANNEL_TYPE_BINARY: 1138 read_binary_packet(chan_info); 1139 break; 1140 #ifdef USE_JANSSON 1141 case CHANNEL_TYPE_JSON: 1142 read_json_packet(chan_info); 1143 break; 1144 #endif 1145 default: 1146 break; 1147 } 1148 } 1149 } 1150 rte_delay_us(time_period_ms*1000); 1151 if (policy_is_set) { 1152 unsigned int j; 1153 1154 for (j = 0; j < RTE_DIM(policies); j++) { 1155 if (policies[j].enabled == 1) 1156 apply_policy(&policies[j]); 1157 } 1158 } 1159 } 1160 } 1161