1 // SPDX-License-Identifier: MIT 2 /* 3 * Copyright © 2019 Intel Corporation 4 */ 5 6 #include <linux/list.h> 7 #include <linux/list_sort.h> 8 #include <linux/llist.h> 9 10 #include "i915_drv.h" 11 #include "intel_engine.h" 12 #include "intel_engine_user.h" 13 #include "intel_gt.h" 14 #include "uc/intel_guc_submission.h" 15 16 struct intel_engine_cs * 17 intel_engine_lookup_user(struct drm_i915_private *i915, u8 class, u8 instance) 18 { 19 struct rb_node *p = i915->uabi_engines.rb_node; 20 21 while (p) { 22 struct intel_engine_cs *it = 23 rb_entry(p, typeof(*it), uabi_node); 24 25 if (class < it->uabi_class) 26 p = p->rb_left; 27 else if (class > it->uabi_class || 28 instance > it->uabi_instance) 29 p = p->rb_right; 30 else if (instance < it->uabi_instance) 31 p = p->rb_left; 32 else 33 return it; 34 } 35 36 return NULL; 37 } 38 39 void intel_engine_add_user(struct intel_engine_cs *engine) 40 { 41 llist_add((struct llist_node *)&engine->uabi_node, 42 (struct llist_head *)&engine->i915->uabi_engines); 43 } 44 45 static const u8 uabi_classes[] = { 46 [RENDER_CLASS] = I915_ENGINE_CLASS_RENDER, 47 [COPY_ENGINE_CLASS] = I915_ENGINE_CLASS_COPY, 48 [VIDEO_DECODE_CLASS] = I915_ENGINE_CLASS_VIDEO, 49 [VIDEO_ENHANCEMENT_CLASS] = I915_ENGINE_CLASS_VIDEO_ENHANCE, 50 [COMPUTE_CLASS] = I915_ENGINE_CLASS_COMPUTE, 51 }; 52 53 static int engine_cmp(void *priv, const struct list_head *A, 54 const struct list_head *B) 55 { 56 const struct intel_engine_cs *a = 57 container_of((struct rb_node *)A, typeof(*a), uabi_node); 58 const struct intel_engine_cs *b = 59 container_of((struct rb_node *)B, typeof(*b), uabi_node); 60 61 if (uabi_classes[a->class] < uabi_classes[b->class]) 62 return -1; 63 if (uabi_classes[a->class] > uabi_classes[b->class]) 64 return 1; 65 66 if (a->instance < b->instance) 67 return -1; 68 if (a->instance > b->instance) 69 return 1; 70 71 return 0; 72 } 73 74 static struct llist_node *get_engines(struct drm_i915_private *i915) 75 { 76 return llist_del_all((struct llist_head *)&i915->uabi_engines); 77 } 78 79 static void sort_engines(struct drm_i915_private *i915, 80 struct list_head *engines) 81 { 82 struct llist_node *pos, *next; 83 84 llist_for_each_safe(pos, next, get_engines(i915)) { 85 struct intel_engine_cs *engine = 86 container_of((struct rb_node *)pos, typeof(*engine), 87 uabi_node); 88 list_add((struct list_head *)&engine->uabi_node, engines); 89 } 90 list_sort(NULL, engines, engine_cmp); 91 } 92 93 #ifdef __linux__ 94 static void set_scheduler_caps(struct drm_i915_private *i915) 95 { 96 static const struct { 97 u8 engine; 98 u8 sched; 99 } map[] = { 100 #define MAP(x, y) { ilog2(I915_ENGINE_##x), ilog2(I915_SCHEDULER_CAP_##y) } 101 MAP(HAS_PREEMPTION, PREEMPTION), 102 MAP(HAS_SEMAPHORES, SEMAPHORES), 103 MAP(SUPPORTS_STATS, ENGINE_BUSY_STATS), 104 #undef MAP 105 }; 106 struct intel_engine_cs *engine; 107 u32 enabled, disabled; 108 109 enabled = 0; 110 disabled = 0; 111 for_each_uabi_engine(engine, i915) { /* all engines must agree! */ 112 int i; 113 114 if (engine->sched_engine->schedule) 115 enabled |= (I915_SCHEDULER_CAP_ENABLED | 116 I915_SCHEDULER_CAP_PRIORITY); 117 else 118 disabled |= (I915_SCHEDULER_CAP_ENABLED | 119 I915_SCHEDULER_CAP_PRIORITY); 120 121 if (intel_uc_uses_guc_submission(&to_gt(i915)->uc)) 122 enabled |= I915_SCHEDULER_CAP_STATIC_PRIORITY_MAP; 123 124 for (i = 0; i < ARRAY_SIZE(map); i++) { 125 if (engine->flags & BIT(map[i].engine)) 126 enabled |= BIT(map[i].sched); 127 else 128 disabled |= BIT(map[i].sched); 129 } 130 } 131 132 i915->caps.scheduler = enabled & ~disabled; 133 if (!(i915->caps.scheduler & I915_SCHEDULER_CAP_ENABLED)) 134 i915->caps.scheduler = 0; 135 } 136 #else 137 /* without the pointless ilog2 -> BIT() */ 138 static void set_scheduler_caps(struct drm_i915_private *i915) 139 { 140 static const struct { 141 u8 engine; 142 u8 sched; 143 } map[] = { 144 #define MAP(x, y) { I915_ENGINE_##x, I915_SCHEDULER_CAP_##y } 145 MAP(HAS_PREEMPTION, PREEMPTION), 146 MAP(HAS_SEMAPHORES, SEMAPHORES), 147 MAP(SUPPORTS_STATS, ENGINE_BUSY_STATS), 148 #undef MAP 149 }; 150 struct intel_engine_cs *engine; 151 u32 enabled, disabled; 152 153 enabled = 0; 154 disabled = 0; 155 for_each_uabi_engine(engine, i915) { /* all engines must agree! */ 156 int i; 157 158 if (engine->sched_engine->schedule) 159 enabled |= (I915_SCHEDULER_CAP_ENABLED | 160 I915_SCHEDULER_CAP_PRIORITY); 161 else 162 disabled |= (I915_SCHEDULER_CAP_ENABLED | 163 I915_SCHEDULER_CAP_PRIORITY); 164 165 if (intel_uc_uses_guc_submission(&to_gt(i915)->uc)) 166 enabled |= I915_SCHEDULER_CAP_STATIC_PRIORITY_MAP; 167 168 for (i = 0; i < ARRAY_SIZE(map); i++) { 169 if (engine->flags & map[i].engine) 170 enabled |= map[i].sched; 171 else 172 disabled |= map[i].sched; 173 } 174 } 175 176 i915->caps.scheduler = enabled & ~disabled; 177 if (!(i915->caps.scheduler & I915_SCHEDULER_CAP_ENABLED)) 178 i915->caps.scheduler = 0; 179 } 180 #endif 181 182 const char *intel_engine_class_repr(u8 class) 183 { 184 static const char * const uabi_names[] = { 185 [RENDER_CLASS] = "rcs", 186 [COPY_ENGINE_CLASS] = "bcs", 187 [VIDEO_DECODE_CLASS] = "vcs", 188 [VIDEO_ENHANCEMENT_CLASS] = "vecs", 189 [COMPUTE_CLASS] = "ccs", 190 }; 191 192 if (class >= ARRAY_SIZE(uabi_names) || !uabi_names[class]) 193 return "xxx"; 194 195 return uabi_names[class]; 196 } 197 198 struct legacy_ring { 199 struct intel_gt *gt; 200 u8 class; 201 u8 instance; 202 }; 203 204 static int legacy_ring_idx(const struct legacy_ring *ring) 205 { 206 static const struct { 207 u8 base, max; 208 } map[] = { 209 [RENDER_CLASS] = { RCS0, 1 }, 210 [COPY_ENGINE_CLASS] = { BCS0, 1 }, 211 [VIDEO_DECODE_CLASS] = { VCS0, I915_MAX_VCS }, 212 [VIDEO_ENHANCEMENT_CLASS] = { VECS0, I915_MAX_VECS }, 213 [COMPUTE_CLASS] = { CCS0, I915_MAX_CCS }, 214 }; 215 216 if (GEM_DEBUG_WARN_ON(ring->class >= ARRAY_SIZE(map))) 217 return INVALID_ENGINE; 218 219 if (GEM_DEBUG_WARN_ON(ring->instance >= map[ring->class].max)) 220 return INVALID_ENGINE; 221 222 return map[ring->class].base + ring->instance; 223 } 224 225 static void add_legacy_ring(struct legacy_ring *ring, 226 struct intel_engine_cs *engine) 227 { 228 if (engine->gt != ring->gt || engine->class != ring->class) { 229 ring->gt = engine->gt; 230 ring->class = engine->class; 231 ring->instance = 0; 232 } 233 234 engine->legacy_idx = legacy_ring_idx(ring); 235 if (engine->legacy_idx != INVALID_ENGINE) 236 ring->instance++; 237 } 238 239 void intel_engines_driver_register(struct drm_i915_private *i915) 240 { 241 struct legacy_ring ring = {}; 242 struct list_head *it, *next; 243 struct rb_node **p, *prev; 244 DRM_LIST_HEAD(engines); 245 246 sort_engines(i915, &engines); 247 248 prev = NULL; 249 p = &i915->uabi_engines.rb_node; 250 list_for_each_safe(it, next, &engines) { 251 struct intel_engine_cs *engine = 252 container_of((struct rb_node *)it, typeof(*engine), 253 uabi_node); 254 char old[sizeof(engine->name)]; 255 256 if (intel_gt_has_unrecoverable_error(engine->gt)) 257 continue; /* ignore incomplete engines */ 258 259 GEM_BUG_ON(engine->class >= ARRAY_SIZE(uabi_classes)); 260 engine->uabi_class = uabi_classes[engine->class]; 261 262 GEM_BUG_ON(engine->uabi_class >= 263 ARRAY_SIZE(i915->engine_uabi_class_count)); 264 engine->uabi_instance = 265 i915->engine_uabi_class_count[engine->uabi_class]++; 266 267 /* Replace the internal name with the final user facing name */ 268 memcpy(old, engine->name, sizeof(engine->name)); 269 scnprintf(engine->name, sizeof(engine->name), "%s%u", 270 intel_engine_class_repr(engine->class), 271 engine->uabi_instance); 272 DRM_DEBUG_DRIVER("renamed %s to %s\n", old, engine->name); 273 274 rb_link_node(&engine->uabi_node, prev, p); 275 rb_insert_color(&engine->uabi_node, &i915->uabi_engines); 276 277 GEM_BUG_ON(intel_engine_lookup_user(i915, 278 engine->uabi_class, 279 engine->uabi_instance) != engine); 280 281 /* Fix up the mapping to match default execbuf::user_map[] */ 282 add_legacy_ring(&ring, engine); 283 284 prev = &engine->uabi_node; 285 p = &prev->rb_right; 286 } 287 288 if (IS_ENABLED(CONFIG_DRM_I915_SELFTESTS) && 289 IS_ENABLED(CONFIG_DRM_I915_DEBUG_GEM)) { 290 struct intel_engine_cs *engine; 291 unsigned int isolation; 292 int class, inst; 293 int errors = 0; 294 295 for (class = 0; class < ARRAY_SIZE(i915->engine_uabi_class_count); class++) { 296 for (inst = 0; inst < i915->engine_uabi_class_count[class]; inst++) { 297 engine = intel_engine_lookup_user(i915, 298 class, inst); 299 if (!engine) { 300 pr_err("UABI engine not found for { class:%d, instance:%d }\n", 301 class, inst); 302 errors++; 303 continue; 304 } 305 306 if (engine->uabi_class != class || 307 engine->uabi_instance != inst) { 308 pr_err("Wrong UABI engine:%s { class:%d, instance:%d } found for { class:%d, instance:%d }\n", 309 engine->name, 310 engine->uabi_class, 311 engine->uabi_instance, 312 class, inst); 313 errors++; 314 continue; 315 } 316 } 317 } 318 319 /* 320 * Make sure that classes with multiple engine instances all 321 * share the same basic configuration. 322 */ 323 isolation = intel_engines_has_context_isolation(i915); 324 for_each_uabi_engine(engine, i915) { 325 unsigned int bit = BIT(engine->uabi_class); 326 unsigned int expected = engine->default_state ? bit : 0; 327 328 if ((isolation & bit) != expected) { 329 pr_err("mismatching default context state for class %d on engine %s\n", 330 engine->uabi_class, engine->name); 331 errors++; 332 } 333 } 334 335 if (drm_WARN(&i915->drm, errors, 336 "Invalid UABI engine mapping found")) 337 i915->uabi_engines = RB_ROOT; 338 } 339 340 set_scheduler_caps(i915); 341 } 342 343 unsigned int intel_engines_has_context_isolation(struct drm_i915_private *i915) 344 { 345 struct intel_engine_cs *engine; 346 unsigned int which; 347 348 which = 0; 349 for_each_uabi_engine(engine, i915) 350 if (engine->default_state) 351 which |= BIT(engine->uabi_class); 352 353 return which; 354 } 355