17b4f1e6bSMatan Azrad /* SPDX-License-Identifier: BSD-3-Clause 27b4f1e6bSMatan Azrad * Copyright 2019 Mellanox Technologies, Ltd 37b4f1e6bSMatan Azrad */ 47b4f1e6bSMatan Azrad 57b4f1e6bSMatan Azrad #include <unistd.h> 67b4f1e6bSMatan Azrad #include <string.h> 793e30982SMatan Azrad #include <stdio.h> 87b4f1e6bSMatan Azrad 97b4f1e6bSMatan Azrad #include <rte_errno.h> 10262c7ad0SOri Kam #include <rte_mempool.h> 11262c7ad0SOri Kam #include <rte_malloc.h> 127b4f1e6bSMatan Azrad 137b4f1e6bSMatan Azrad #include "mlx5_common.h" 14262c7ad0SOri Kam #include "mlx5_common_os.h" 157b4f1e6bSMatan Azrad #include "mlx5_common_utils.h" 16fd970a54SSuanming Mou #include "mlx5_malloc.h" 178a41f4deSParav Pandit #include "mlx5_common_pci.h" 187b4f1e6bSMatan Azrad 197b4f1e6bSMatan Azrad int mlx5_common_logtype; 207b4f1e6bSMatan Azrad 214c204fe5SShiri Kuzin uint8_t haswell_broadwell_cpu; 224c204fe5SShiri Kuzin 234c204fe5SShiri Kuzin /* In case this is an x86_64 intel processor to check if 244c204fe5SShiri Kuzin * we should use relaxed ordering. 254c204fe5SShiri Kuzin */ 264c204fe5SShiri Kuzin #ifdef RTE_ARCH_X86_64 274c204fe5SShiri Kuzin /** 284c204fe5SShiri Kuzin * This function returns processor identification and feature information 294c204fe5SShiri Kuzin * into the registers. 304c204fe5SShiri Kuzin * 314c204fe5SShiri Kuzin * @param eax, ebx, ecx, edx 324c204fe5SShiri Kuzin * Pointers to the registers that will hold cpu information. 334c204fe5SShiri Kuzin * @param level 344c204fe5SShiri Kuzin * The main category of information returned. 354c204fe5SShiri Kuzin */ 364c204fe5SShiri Kuzin static inline void mlx5_cpu_id(unsigned int level, 374c204fe5SShiri Kuzin unsigned int *eax, unsigned int *ebx, 384c204fe5SShiri Kuzin unsigned int *ecx, unsigned int *edx) 394c204fe5SShiri Kuzin { 404c204fe5SShiri Kuzin __asm__("cpuid\n\t" 414c204fe5SShiri Kuzin : "=a" (*eax), "=b" (*ebx), "=c" (*ecx), "=d" (*edx) 424c204fe5SShiri Kuzin : "0" (level)); 434c204fe5SShiri Kuzin } 444c204fe5SShiri Kuzin #endif 454c204fe5SShiri Kuzin 4683c99c36SThomas Monjalon RTE_INIT_PRIO(mlx5_log_init, LOG) 4783c99c36SThomas Monjalon { 4883c99c36SThomas Monjalon mlx5_common_logtype = rte_log_register("pmd.common.mlx5"); 4983c99c36SThomas Monjalon if (mlx5_common_logtype >= 0) 5083c99c36SThomas Monjalon rte_log_set_level(mlx5_common_logtype, RTE_LOG_NOTICE); 5183c99c36SThomas Monjalon } 5283c99c36SThomas Monjalon 5382088001SParav Pandit static bool mlx5_common_initialized; 5482088001SParav Pandit 5583c99c36SThomas Monjalon /** 5682088001SParav Pandit * One time innitialization routine for run-time dependency on glue library 5782088001SParav Pandit * for multiple PMDs. Each mlx5 PMD that depends on mlx5_common module, 5882088001SParav Pandit * must invoke in its constructor. 5983c99c36SThomas Monjalon */ 6082088001SParav Pandit void 6182088001SParav Pandit mlx5_common_init(void) 6283c99c36SThomas Monjalon { 6382088001SParav Pandit if (mlx5_common_initialized) 6482088001SParav Pandit return; 6582088001SParav Pandit 6679aa4307SOphir Munk mlx5_glue_constructor(); 678a41f4deSParav Pandit mlx5_common_pci_init(); 6882088001SParav Pandit mlx5_common_initialized = true; 697b4f1e6bSMatan Azrad } 704c204fe5SShiri Kuzin 714c204fe5SShiri Kuzin /** 724c204fe5SShiri Kuzin * This function is responsible of initializing the variable 734c204fe5SShiri Kuzin * haswell_broadwell_cpu by checking if the cpu is intel 744c204fe5SShiri Kuzin * and reading the data returned from mlx5_cpu_id(). 754c204fe5SShiri Kuzin * since haswell and broadwell cpus don't have improved performance 764c204fe5SShiri Kuzin * when using relaxed ordering we want to check the cpu type before 774c204fe5SShiri Kuzin * before deciding whether to enable RO or not. 784c204fe5SShiri Kuzin * if the cpu is haswell or broadwell the variable will be set to 1 794c204fe5SShiri Kuzin * otherwise it will be 0. 804c204fe5SShiri Kuzin */ 814c204fe5SShiri Kuzin RTE_INIT_PRIO(mlx5_is_haswell_broadwell_cpu, LOG) 824c204fe5SShiri Kuzin { 834c204fe5SShiri Kuzin #ifdef RTE_ARCH_X86_64 844c204fe5SShiri Kuzin unsigned int broadwell_models[4] = {0x3d, 0x47, 0x4F, 0x56}; 854c204fe5SShiri Kuzin unsigned int haswell_models[4] = {0x3c, 0x3f, 0x45, 0x46}; 864c204fe5SShiri Kuzin unsigned int i, model, family, brand_id, vendor; 874c204fe5SShiri Kuzin unsigned int signature_intel_ebx = 0x756e6547; 884c204fe5SShiri Kuzin unsigned int extended_model; 894c204fe5SShiri Kuzin unsigned int eax = 0; 904c204fe5SShiri Kuzin unsigned int ebx = 0; 914c204fe5SShiri Kuzin unsigned int ecx = 0; 924c204fe5SShiri Kuzin unsigned int edx = 0; 934c204fe5SShiri Kuzin int max_level; 944c204fe5SShiri Kuzin 954c204fe5SShiri Kuzin mlx5_cpu_id(0, &eax, &ebx, &ecx, &edx); 964c204fe5SShiri Kuzin vendor = ebx; 974c204fe5SShiri Kuzin max_level = eax; 984c204fe5SShiri Kuzin if (max_level < 1) { 994c204fe5SShiri Kuzin haswell_broadwell_cpu = 0; 1004c204fe5SShiri Kuzin return; 1014c204fe5SShiri Kuzin } 1024c204fe5SShiri Kuzin mlx5_cpu_id(1, &eax, &ebx, &ecx, &edx); 1034c204fe5SShiri Kuzin model = (eax >> 4) & 0x0f; 1044c204fe5SShiri Kuzin family = (eax >> 8) & 0x0f; 1054c204fe5SShiri Kuzin brand_id = ebx & 0xff; 1064c204fe5SShiri Kuzin extended_model = (eax >> 12) & 0xf0; 1074c204fe5SShiri Kuzin /* Check if the processor is Haswell or Broadwell */ 1084c204fe5SShiri Kuzin if (vendor == signature_intel_ebx) { 1094c204fe5SShiri Kuzin if (family == 0x06) 1104c204fe5SShiri Kuzin model += extended_model; 1114c204fe5SShiri Kuzin if (brand_id == 0 && family == 0x6) { 1124c204fe5SShiri Kuzin for (i = 0; i < RTE_DIM(broadwell_models); i++) 1134c204fe5SShiri Kuzin if (model == broadwell_models[i]) { 1144c204fe5SShiri Kuzin haswell_broadwell_cpu = 1; 1154c204fe5SShiri Kuzin return; 1164c204fe5SShiri Kuzin } 1174c204fe5SShiri Kuzin for (i = 0; i < RTE_DIM(haswell_models); i++) 1184c204fe5SShiri Kuzin if (model == haswell_models[i]) { 1194c204fe5SShiri Kuzin haswell_broadwell_cpu = 1; 1204c204fe5SShiri Kuzin return; 1214c204fe5SShiri Kuzin } 1224c204fe5SShiri Kuzin } 1234c204fe5SShiri Kuzin } 1244c204fe5SShiri Kuzin #endif 1254c204fe5SShiri Kuzin haswell_broadwell_cpu = 0; 1264c204fe5SShiri Kuzin } 127262c7ad0SOri Kam 128262c7ad0SOri Kam /** 129262c7ad0SOri Kam * Allocate page of door-bells and register it using DevX API. 130262c7ad0SOri Kam * 131262c7ad0SOri Kam * @param [in] ctx 132262c7ad0SOri Kam * Pointer to the device context. 133262c7ad0SOri Kam * 134262c7ad0SOri Kam * @return 135262c7ad0SOri Kam * Pointer to new page on success, NULL otherwise. 136262c7ad0SOri Kam */ 137262c7ad0SOri Kam static struct mlx5_devx_dbr_page * 138262c7ad0SOri Kam mlx5_alloc_dbr_page(void *ctx) 139262c7ad0SOri Kam { 140262c7ad0SOri Kam struct mlx5_devx_dbr_page *page; 141262c7ad0SOri Kam 142262c7ad0SOri Kam /* Allocate space for door-bell page and management data. */ 143fd970a54SSuanming Mou page = mlx5_malloc(MLX5_MEM_RTE | MLX5_MEM_ZERO, 144fd970a54SSuanming Mou sizeof(struct mlx5_devx_dbr_page), 145262c7ad0SOri Kam RTE_CACHE_LINE_SIZE, SOCKET_ID_ANY); 146262c7ad0SOri Kam if (!page) { 147262c7ad0SOri Kam DRV_LOG(ERR, "cannot allocate dbr page"); 148262c7ad0SOri Kam return NULL; 149262c7ad0SOri Kam } 150262c7ad0SOri Kam /* Register allocated memory. */ 151262c7ad0SOri Kam page->umem = mlx5_glue->devx_umem_reg(ctx, page->dbrs, 152262c7ad0SOri Kam MLX5_DBR_PAGE_SIZE, 0); 153262c7ad0SOri Kam if (!page->umem) { 154262c7ad0SOri Kam DRV_LOG(ERR, "cannot umem reg dbr page"); 155fd970a54SSuanming Mou mlx5_free(page); 156262c7ad0SOri Kam return NULL; 157262c7ad0SOri Kam } 158262c7ad0SOri Kam return page; 159262c7ad0SOri Kam } 160262c7ad0SOri Kam 161262c7ad0SOri Kam /** 162262c7ad0SOri Kam * Find the next available door-bell, allocate new page if needed. 163262c7ad0SOri Kam * 164262c7ad0SOri Kam * @param [in] ctx 165262c7ad0SOri Kam * Pointer to device context. 166262c7ad0SOri Kam * @param [in] head 167262c7ad0SOri Kam * Pointer to the head of dbr pages list. 168262c7ad0SOri Kam * @param [out] dbr_page 169262c7ad0SOri Kam * Door-bell page containing the page data. 170262c7ad0SOri Kam * 171262c7ad0SOri Kam * @return 172262c7ad0SOri Kam * Door-bell address offset on success, a negative error value otherwise. 173262c7ad0SOri Kam */ 174262c7ad0SOri Kam int64_t 175262c7ad0SOri Kam mlx5_get_dbr(void *ctx, struct mlx5_dbr_page_list *head, 176262c7ad0SOri Kam struct mlx5_devx_dbr_page **dbr_page) 177262c7ad0SOri Kam { 178262c7ad0SOri Kam struct mlx5_devx_dbr_page *page = NULL; 179262c7ad0SOri Kam uint32_t i, j; 180262c7ad0SOri Kam 181262c7ad0SOri Kam LIST_FOREACH(page, head, next) 182262c7ad0SOri Kam if (page->dbr_count < MLX5_DBR_PER_PAGE) 183262c7ad0SOri Kam break; 184262c7ad0SOri Kam if (!page) { /* No page with free door-bell exists. */ 185262c7ad0SOri Kam page = mlx5_alloc_dbr_page(ctx); 186262c7ad0SOri Kam if (!page) /* Failed to allocate new page. */ 187262c7ad0SOri Kam return (-1); 188262c7ad0SOri Kam LIST_INSERT_HEAD(head, page, next); 189262c7ad0SOri Kam } 190262c7ad0SOri Kam /* Loop to find bitmap part with clear bit. */ 191262c7ad0SOri Kam for (i = 0; 192262c7ad0SOri Kam i < MLX5_DBR_BITMAP_SIZE && page->dbr_bitmap[i] == UINT64_MAX; 193262c7ad0SOri Kam i++) 194262c7ad0SOri Kam ; /* Empty. */ 195262c7ad0SOri Kam /* Find the first clear bit. */ 196262c7ad0SOri Kam MLX5_ASSERT(i < MLX5_DBR_BITMAP_SIZE); 197262c7ad0SOri Kam j = rte_bsf64(~page->dbr_bitmap[i]); 198262c7ad0SOri Kam page->dbr_bitmap[i] |= (UINT64_C(1) << j); 199262c7ad0SOri Kam page->dbr_count++; 200262c7ad0SOri Kam *dbr_page = page; 20144c1b52bSViacheslav Ovsiienko return (i * CHAR_BIT * sizeof(uint64_t) + j) * MLX5_DBR_SIZE; 202262c7ad0SOri Kam } 203262c7ad0SOri Kam 204262c7ad0SOri Kam /** 205262c7ad0SOri Kam * Release a door-bell record. 206262c7ad0SOri Kam * 207262c7ad0SOri Kam * @param [in] head 208262c7ad0SOri Kam * Pointer to the head of dbr pages list. 209262c7ad0SOri Kam * @param [in] umem_id 210262c7ad0SOri Kam * UMEM ID of page containing the door-bell record to release. 211262c7ad0SOri Kam * @param [in] offset 212262c7ad0SOri Kam * Offset of door-bell record in page. 213262c7ad0SOri Kam * 214262c7ad0SOri Kam * @return 215262c7ad0SOri Kam * 0 on success, a negative error value otherwise. 216262c7ad0SOri Kam */ 217262c7ad0SOri Kam int32_t 218262c7ad0SOri Kam mlx5_release_dbr(struct mlx5_dbr_page_list *head, uint32_t umem_id, 219262c7ad0SOri Kam uint64_t offset) 220262c7ad0SOri Kam { 221262c7ad0SOri Kam struct mlx5_devx_dbr_page *page = NULL; 222262c7ad0SOri Kam int ret = 0; 223262c7ad0SOri Kam 224262c7ad0SOri Kam LIST_FOREACH(page, head, next) 225262c7ad0SOri Kam /* Find the page this address belongs to. */ 226262c7ad0SOri Kam if (mlx5_os_get_umem_id(page->umem) == umem_id) 227262c7ad0SOri Kam break; 228262c7ad0SOri Kam if (!page) 229262c7ad0SOri Kam return -EINVAL; 230262c7ad0SOri Kam page->dbr_count--; 231262c7ad0SOri Kam if (!page->dbr_count) { 232262c7ad0SOri Kam /* Page not used, free it and remove from list. */ 233262c7ad0SOri Kam LIST_REMOVE(page, next); 234262c7ad0SOri Kam if (page->umem) 235262c7ad0SOri Kam ret = -mlx5_glue->devx_umem_dereg(page->umem); 236fd970a54SSuanming Mou mlx5_free(page); 237262c7ad0SOri Kam } else { 238262c7ad0SOri Kam /* Mark in bitmap that this door-bell is not in use. */ 239262c7ad0SOri Kam offset /= MLX5_DBR_SIZE; 240262c7ad0SOri Kam int i = offset / 64; 241262c7ad0SOri Kam int j = offset % 64; 242262c7ad0SOri Kam 243262c7ad0SOri Kam page->dbr_bitmap[i] &= ~(UINT64_C(1) << j); 244262c7ad0SOri Kam } 245262c7ad0SOri Kam return ret; 246262c7ad0SOri Kam } 247*9cc0e99cSViacheslav Ovsiienko 248*9cc0e99cSViacheslav Ovsiienko /** 249*9cc0e99cSViacheslav Ovsiienko * Allocate the User Access Region with DevX on specified device. 250*9cc0e99cSViacheslav Ovsiienko * 251*9cc0e99cSViacheslav Ovsiienko * @param [in] ctx 252*9cc0e99cSViacheslav Ovsiienko * Infiniband device context to perform allocation on. 253*9cc0e99cSViacheslav Ovsiienko * @param [in] mapping 254*9cc0e99cSViacheslav Ovsiienko * MLX5DV_UAR_ALLOC_TYPE_BF - allocate as cached memory with write-combining 255*9cc0e99cSViacheslav Ovsiienko * attributes (if supported by the host), the 256*9cc0e99cSViacheslav Ovsiienko * writes to the UAR registers must be followed 257*9cc0e99cSViacheslav Ovsiienko * by write memory barrier. 258*9cc0e99cSViacheslav Ovsiienko * MLX5DV_UAR_ALLOC_TYPE_NC - allocate as non-cached nenory, all writes are 259*9cc0e99cSViacheslav Ovsiienko * promoted to the registers immediately, no 260*9cc0e99cSViacheslav Ovsiienko * memory barriers needed. 261*9cc0e99cSViacheslav Ovsiienko * mapping < 0 - the first attempt is performed with MLX5DV_UAR_ALLOC_TYPE_BF, 262*9cc0e99cSViacheslav Ovsiienko * if this fails the next attempt with MLX5DV_UAR_ALLOC_TYPE_NC 263*9cc0e99cSViacheslav Ovsiienko * is performed. The drivers specifying negative values should 264*9cc0e99cSViacheslav Ovsiienko * always provide the write memory barrier operation after UAR 265*9cc0e99cSViacheslav Ovsiienko * register writings. 266*9cc0e99cSViacheslav Ovsiienko * If there is no definitions for the MLX5DV_UAR_ALLOC_TYPE_xx (older rdma 267*9cc0e99cSViacheslav Ovsiienko * library headers), the caller can specify 0. 268*9cc0e99cSViacheslav Ovsiienko * 269*9cc0e99cSViacheslav Ovsiienko * @return 270*9cc0e99cSViacheslav Ovsiienko * UAR object pointer on success, NULL otherwise and rte_errno is set. 271*9cc0e99cSViacheslav Ovsiienko */ 272*9cc0e99cSViacheslav Ovsiienko void * 273*9cc0e99cSViacheslav Ovsiienko mlx5_devx_alloc_uar(void *ctx, int mapping) 274*9cc0e99cSViacheslav Ovsiienko { 275*9cc0e99cSViacheslav Ovsiienko void *uar; 276*9cc0e99cSViacheslav Ovsiienko uint32_t retry, uar_mapping; 277*9cc0e99cSViacheslav Ovsiienko void *base_addr; 278*9cc0e99cSViacheslav Ovsiienko 279*9cc0e99cSViacheslav Ovsiienko for (retry = 0; retry < MLX5_ALLOC_UAR_RETRY; ++retry) { 280*9cc0e99cSViacheslav Ovsiienko #ifdef MLX5DV_UAR_ALLOC_TYPE_NC 281*9cc0e99cSViacheslav Ovsiienko /* Control the mapping type according to the settings. */ 282*9cc0e99cSViacheslav Ovsiienko uar_mapping = (mapping < 0) ? 283*9cc0e99cSViacheslav Ovsiienko MLX5DV_UAR_ALLOC_TYPE_NC : mapping; 284*9cc0e99cSViacheslav Ovsiienko #else 285*9cc0e99cSViacheslav Ovsiienko /* 286*9cc0e99cSViacheslav Ovsiienko * It seems we have no way to control the memory mapping type 287*9cc0e99cSViacheslav Ovsiienko * for the UAR, the default "Write-Combining" type is supposed. 288*9cc0e99cSViacheslav Ovsiienko */ 289*9cc0e99cSViacheslav Ovsiienko uar_mapping = 0; 290*9cc0e99cSViacheslav Ovsiienko RTE_SET_USED(mapping); 291*9cc0e99cSViacheslav Ovsiienko #endif 292*9cc0e99cSViacheslav Ovsiienko uar = mlx5_glue->devx_alloc_uar(ctx, uar_mapping); 293*9cc0e99cSViacheslav Ovsiienko #ifdef MLX5DV_UAR_ALLOC_TYPE_NC 294*9cc0e99cSViacheslav Ovsiienko if (!uar && 295*9cc0e99cSViacheslav Ovsiienko mapping < 0 && 296*9cc0e99cSViacheslav Ovsiienko uar_mapping == MLX5DV_UAR_ALLOC_TYPE_BF) { 297*9cc0e99cSViacheslav Ovsiienko /* 298*9cc0e99cSViacheslav Ovsiienko * In some environments like virtual machine the 299*9cc0e99cSViacheslav Ovsiienko * Write Combining mapped might be not supported and 300*9cc0e99cSViacheslav Ovsiienko * UAR allocation fails. We tried "Non-Cached" mapping 301*9cc0e99cSViacheslav Ovsiienko * for the case. 302*9cc0e99cSViacheslav Ovsiienko */ 303*9cc0e99cSViacheslav Ovsiienko DRV_LOG(WARNING, "Failed to allocate DevX UAR (BF)"); 304*9cc0e99cSViacheslav Ovsiienko uar_mapping = MLX5DV_UAR_ALLOC_TYPE_NC; 305*9cc0e99cSViacheslav Ovsiienko uar = mlx5_glue->devx_alloc_uar(ctx, uar_mapping); 306*9cc0e99cSViacheslav Ovsiienko } else if (!uar && 307*9cc0e99cSViacheslav Ovsiienko mapping < 0 && 308*9cc0e99cSViacheslav Ovsiienko uar_mapping == MLX5DV_UAR_ALLOC_TYPE_NC) { 309*9cc0e99cSViacheslav Ovsiienko /* 310*9cc0e99cSViacheslav Ovsiienko * If Verbs/kernel does not support "Non-Cached" 311*9cc0e99cSViacheslav Ovsiienko * try the "Write-Combining". 312*9cc0e99cSViacheslav Ovsiienko */ 313*9cc0e99cSViacheslav Ovsiienko DRV_LOG(WARNING, "Failed to allocate DevX UAR (NC)"); 314*9cc0e99cSViacheslav Ovsiienko uar_mapping = MLX5DV_UAR_ALLOC_TYPE_BF; 315*9cc0e99cSViacheslav Ovsiienko uar = mlx5_glue->devx_alloc_uar(ctx, uar_mapping); 316*9cc0e99cSViacheslav Ovsiienko } 317*9cc0e99cSViacheslav Ovsiienko #endif 318*9cc0e99cSViacheslav Ovsiienko if (!uar) { 319*9cc0e99cSViacheslav Ovsiienko DRV_LOG(ERR, "Failed to allocate DevX UAR (BF/NC)"); 320*9cc0e99cSViacheslav Ovsiienko rte_errno = ENOMEM; 321*9cc0e99cSViacheslav Ovsiienko goto exit; 322*9cc0e99cSViacheslav Ovsiienko } 323*9cc0e99cSViacheslav Ovsiienko base_addr = mlx5_os_get_devx_uar_base_addr(uar); 324*9cc0e99cSViacheslav Ovsiienko if (base_addr) 325*9cc0e99cSViacheslav Ovsiienko break; 326*9cc0e99cSViacheslav Ovsiienko /* 327*9cc0e99cSViacheslav Ovsiienko * The UARs are allocated by rdma_core within the 328*9cc0e99cSViacheslav Ovsiienko * IB device context, on context closure all UARs 329*9cc0e99cSViacheslav Ovsiienko * will be freed, should be no memory/object leakage. 330*9cc0e99cSViacheslav Ovsiienko */ 331*9cc0e99cSViacheslav Ovsiienko DRV_LOG(WARNING, "Retrying to allocate DevX UAR"); 332*9cc0e99cSViacheslav Ovsiienko uar = NULL; 333*9cc0e99cSViacheslav Ovsiienko } 334*9cc0e99cSViacheslav Ovsiienko /* Check whether we finally succeeded with valid UAR allocation. */ 335*9cc0e99cSViacheslav Ovsiienko if (!uar) { 336*9cc0e99cSViacheslav Ovsiienko DRV_LOG(ERR, "Failed to allocate DevX UAR (NULL base)"); 337*9cc0e99cSViacheslav Ovsiienko rte_errno = ENOMEM; 338*9cc0e99cSViacheslav Ovsiienko } 339*9cc0e99cSViacheslav Ovsiienko /* 340*9cc0e99cSViacheslav Ovsiienko * Return void * instead of struct mlx5dv_devx_uar * 341*9cc0e99cSViacheslav Ovsiienko * is for compatibility with older rdma-core library headers. 342*9cc0e99cSViacheslav Ovsiienko */ 343*9cc0e99cSViacheslav Ovsiienko exit: 344*9cc0e99cSViacheslav Ovsiienko return uar; 345*9cc0e99cSViacheslav Ovsiienko } 346