1 // SPDX-License-Identifier: MIT 2 /* 3 * Copyright © 2019 Intel Corporation 4 */ 5 6 #include <linux/list.h> 7 #include <linux/list_sort.h> 8 #include <linux/llist.h> 9 10 #include "i915_drv.h" 11 #include "intel_engine.h" 12 #include "intel_engine_user.h" 13 #include "intel_gt.h" 14 #include "uc/intel_guc_submission.h" 15 16 struct intel_engine_cs * 17 intel_engine_lookup_user(struct drm_i915_private *i915, u8 class, u8 instance) 18 { 19 struct rb_node *p = i915->uabi_engines.rb_node; 20 21 while (p) { 22 struct intel_engine_cs *it = 23 rb_entry(p, typeof(*it), uabi_node); 24 25 if (class < it->uabi_class) 26 p = p->rb_left; 27 else if (class > it->uabi_class || 28 instance > it->uabi_instance) 29 p = p->rb_right; 30 else if (instance < it->uabi_instance) 31 p = p->rb_left; 32 else 33 return it; 34 } 35 36 return NULL; 37 } 38 39 void intel_engine_add_user(struct intel_engine_cs *engine) 40 { 41 llist_add((struct llist_node *)&engine->uabi_node, 42 (struct llist_head *)&engine->i915->uabi_engines); 43 } 44 45 static const u8 uabi_classes[] = { 46 [RENDER_CLASS] = I915_ENGINE_CLASS_RENDER, 47 [COPY_ENGINE_CLASS] = I915_ENGINE_CLASS_COPY, 48 [VIDEO_DECODE_CLASS] = I915_ENGINE_CLASS_VIDEO, 49 [VIDEO_ENHANCEMENT_CLASS] = I915_ENGINE_CLASS_VIDEO_ENHANCE, 50 }; 51 52 static int engine_cmp(void *priv, const struct list_head *A, 53 const struct list_head *B) 54 { 55 const struct intel_engine_cs *a = 56 container_of((struct rb_node *)A, typeof(*a), uabi_node); 57 const struct intel_engine_cs *b = 58 container_of((struct rb_node *)B, typeof(*b), uabi_node); 59 60 if (uabi_classes[a->class] < uabi_classes[b->class]) 61 return -1; 62 if (uabi_classes[a->class] > uabi_classes[b->class]) 63 return 1; 64 65 if (a->instance < b->instance) 66 return -1; 67 if (a->instance > b->instance) 68 return 1; 69 70 return 0; 71 } 72 73 static struct llist_node *get_engines(struct drm_i915_private *i915) 74 { 75 return llist_del_all((struct llist_head *)&i915->uabi_engines); 76 } 77 78 static void sort_engines(struct drm_i915_private *i915, 79 struct list_head *engines) 80 { 81 struct llist_node *pos, *next; 82 83 llist_for_each_safe(pos, next, get_engines(i915)) { 84 struct intel_engine_cs *engine = 85 container_of((struct rb_node *)pos, typeof(*engine), 86 uabi_node); 87 list_add((struct list_head *)&engine->uabi_node, engines); 88 } 89 list_sort(NULL, engines, engine_cmp); 90 } 91 92 #ifdef __linux__ 93 static void set_scheduler_caps(struct drm_i915_private *i915) 94 { 95 static const struct { 96 u8 engine; 97 u8 sched; 98 } map[] = { 99 #define MAP(x, y) { ilog2(I915_ENGINE_##x), ilog2(I915_SCHEDULER_CAP_##y) } 100 MAP(HAS_PREEMPTION, PREEMPTION), 101 MAP(HAS_SEMAPHORES, SEMAPHORES), 102 MAP(SUPPORTS_STATS, ENGINE_BUSY_STATS), 103 #undef MAP 104 }; 105 struct intel_engine_cs *engine; 106 u32 enabled, disabled; 107 108 enabled = 0; 109 disabled = 0; 110 for_each_uabi_engine(engine, i915) { /* all engines must agree! */ 111 int i; 112 113 if (engine->sched_engine->schedule) 114 enabled |= (I915_SCHEDULER_CAP_ENABLED | 115 I915_SCHEDULER_CAP_PRIORITY); 116 else 117 disabled |= (I915_SCHEDULER_CAP_ENABLED | 118 I915_SCHEDULER_CAP_PRIORITY); 119 120 if (intel_uc_uses_guc_submission(&i915->gt.uc)) 121 enabled |= I915_SCHEDULER_CAP_STATIC_PRIORITY_MAP; 122 123 for (i = 0; i < ARRAY_SIZE(map); i++) { 124 if (engine->flags & BIT(map[i].engine)) 125 enabled |= BIT(map[i].sched); 126 else 127 disabled |= BIT(map[i].sched); 128 } 129 } 130 131 i915->caps.scheduler = enabled & ~disabled; 132 if (!(i915->caps.scheduler & I915_SCHEDULER_CAP_ENABLED)) 133 i915->caps.scheduler = 0; 134 } 135 #else 136 /* without the pointless ilog2 -> BIT() */ 137 static void set_scheduler_caps(struct drm_i915_private *i915) 138 { 139 static const struct { 140 u8 engine; 141 u8 sched; 142 } map[] = { 143 #define MAP(x, y) { I915_ENGINE_##x, I915_SCHEDULER_CAP_##y } 144 MAP(HAS_PREEMPTION, PREEMPTION), 145 MAP(HAS_SEMAPHORES, SEMAPHORES), 146 MAP(SUPPORTS_STATS, ENGINE_BUSY_STATS), 147 #undef MAP 148 }; 149 struct intel_engine_cs *engine; 150 u32 enabled, disabled; 151 152 enabled = 0; 153 disabled = 0; 154 for_each_uabi_engine(engine, i915) { /* all engines must agree! */ 155 int i; 156 157 if (engine->sched_engine->schedule) 158 enabled |= (I915_SCHEDULER_CAP_ENABLED | 159 I915_SCHEDULER_CAP_PRIORITY); 160 else 161 disabled |= (I915_SCHEDULER_CAP_ENABLED | 162 I915_SCHEDULER_CAP_PRIORITY); 163 164 if (intel_uc_uses_guc_submission(&i915->gt.uc)) 165 enabled |= I915_SCHEDULER_CAP_STATIC_PRIORITY_MAP; 166 167 for (i = 0; i < ARRAY_SIZE(map); i++) { 168 if (engine->flags & map[i].engine) 169 enabled |= map[i].sched; 170 else 171 disabled |= map[i].sched; 172 } 173 } 174 175 i915->caps.scheduler = enabled & ~disabled; 176 if (!(i915->caps.scheduler & I915_SCHEDULER_CAP_ENABLED)) 177 i915->caps.scheduler = 0; 178 } 179 #endif 180 181 const char *intel_engine_class_repr(u8 class) 182 { 183 static const char * const uabi_names[] = { 184 [RENDER_CLASS] = "rcs", 185 [COPY_ENGINE_CLASS] = "bcs", 186 [VIDEO_DECODE_CLASS] = "vcs", 187 [VIDEO_ENHANCEMENT_CLASS] = "vecs", 188 }; 189 190 if (class >= ARRAY_SIZE(uabi_names) || !uabi_names[class]) 191 return "xxx"; 192 193 return uabi_names[class]; 194 } 195 196 struct legacy_ring { 197 struct intel_gt *gt; 198 u8 class; 199 u8 instance; 200 }; 201 202 static int legacy_ring_idx(const struct legacy_ring *ring) 203 { 204 static const struct { 205 u8 base, max; 206 } map[] = { 207 [RENDER_CLASS] = { RCS0, 1 }, 208 [COPY_ENGINE_CLASS] = { BCS0, 1 }, 209 [VIDEO_DECODE_CLASS] = { VCS0, I915_MAX_VCS }, 210 [VIDEO_ENHANCEMENT_CLASS] = { VECS0, I915_MAX_VECS }, 211 }; 212 213 if (GEM_DEBUG_WARN_ON(ring->class >= ARRAY_SIZE(map))) 214 return INVALID_ENGINE; 215 216 if (GEM_DEBUG_WARN_ON(ring->instance >= map[ring->class].max)) 217 return INVALID_ENGINE; 218 219 return map[ring->class].base + ring->instance; 220 } 221 222 static void add_legacy_ring(struct legacy_ring *ring, 223 struct intel_engine_cs *engine) 224 { 225 if (engine->gt != ring->gt || engine->class != ring->class) { 226 ring->gt = engine->gt; 227 ring->class = engine->class; 228 ring->instance = 0; 229 } 230 231 engine->legacy_idx = legacy_ring_idx(ring); 232 if (engine->legacy_idx != INVALID_ENGINE) 233 ring->instance++; 234 } 235 236 void intel_engines_driver_register(struct drm_i915_private *i915) 237 { 238 struct legacy_ring ring = {}; 239 u8 uabi_instances[4] = {}; 240 struct list_head *it, *next; 241 struct rb_node **p, *prev; 242 DRM_LIST_HEAD(engines); 243 244 sort_engines(i915, &engines); 245 246 prev = NULL; 247 p = &i915->uabi_engines.rb_node; 248 list_for_each_safe(it, next, &engines) { 249 struct intel_engine_cs *engine = 250 container_of((struct rb_node *)it, typeof(*engine), 251 uabi_node); 252 char old[sizeof(engine->name)]; 253 254 if (intel_gt_has_unrecoverable_error(engine->gt)) 255 continue; /* ignore incomplete engines */ 256 257 GEM_BUG_ON(engine->class >= ARRAY_SIZE(uabi_classes)); 258 engine->uabi_class = uabi_classes[engine->class]; 259 260 GEM_BUG_ON(engine->uabi_class >= ARRAY_SIZE(uabi_instances)); 261 engine->uabi_instance = uabi_instances[engine->uabi_class]++; 262 263 /* Replace the internal name with the final user facing name */ 264 memcpy(old, engine->name, sizeof(engine->name)); 265 scnprintf(engine->name, sizeof(engine->name), "%s%u", 266 intel_engine_class_repr(engine->class), 267 engine->uabi_instance); 268 DRM_DEBUG_DRIVER("renamed %s to %s\n", old, engine->name); 269 270 rb_link_node(&engine->uabi_node, prev, p); 271 rb_insert_color(&engine->uabi_node, &i915->uabi_engines); 272 273 GEM_BUG_ON(intel_engine_lookup_user(i915, 274 engine->uabi_class, 275 engine->uabi_instance) != engine); 276 277 /* Fix up the mapping to match default execbuf::user_map[] */ 278 add_legacy_ring(&ring, engine); 279 280 prev = &engine->uabi_node; 281 p = &prev->rb_right; 282 } 283 284 if (IS_ENABLED(CONFIG_DRM_I915_SELFTESTS) && 285 IS_ENABLED(CONFIG_DRM_I915_DEBUG_GEM)) { 286 struct intel_engine_cs *engine; 287 unsigned int isolation; 288 int class, inst; 289 int errors = 0; 290 291 for (class = 0; class < ARRAY_SIZE(uabi_instances); class++) { 292 for (inst = 0; inst < uabi_instances[class]; inst++) { 293 engine = intel_engine_lookup_user(i915, 294 class, inst); 295 if (!engine) { 296 pr_err("UABI engine not found for { class:%d, instance:%d }\n", 297 class, inst); 298 errors++; 299 continue; 300 } 301 302 if (engine->uabi_class != class || 303 engine->uabi_instance != inst) { 304 pr_err("Wrong UABI engine:%s { class:%d, instance:%d } found for { class:%d, instance:%d }\n", 305 engine->name, 306 engine->uabi_class, 307 engine->uabi_instance, 308 class, inst); 309 errors++; 310 continue; 311 } 312 } 313 } 314 315 /* 316 * Make sure that classes with multiple engine instances all 317 * share the same basic configuration. 318 */ 319 isolation = intel_engines_has_context_isolation(i915); 320 for_each_uabi_engine(engine, i915) { 321 unsigned int bit = BIT(engine->uabi_class); 322 unsigned int expected = engine->default_state ? bit : 0; 323 324 if ((isolation & bit) != expected) { 325 pr_err("mismatching default context state for class %d on engine %s\n", 326 engine->uabi_class, engine->name); 327 errors++; 328 } 329 } 330 331 if (drm_WARN(&i915->drm, errors, 332 "Invalid UABI engine mapping found")) 333 i915->uabi_engines = RB_ROOT; 334 } 335 336 set_scheduler_caps(i915); 337 } 338 339 unsigned int intel_engines_has_context_isolation(struct drm_i915_private *i915) 340 { 341 struct intel_engine_cs *engine; 342 unsigned int which; 343 344 which = 0; 345 for_each_uabi_engine(engine, i915) 346 if (engine->default_state) 347 which |= BIT(engine->uabi_class); 348 349 return which; 350 } 351