Lines Matching defs:tq

98 #define	TQSTAT_INC(tq, stat)	wmsum_add(&tq->tq_sums.tqs_##stat, 1)
99 #define TQSTAT_DEC(tq, stat) wmsum_add(&tq->tq_sums.tqs_##stat, -1)
101 #define _TQSTAT_MOD_LIST(mod, tq, t) do { \
104 case TQENT_LIST_PENDING: mod(tq, tasks_pending); break; \
105 case TQENT_LIST_PRIORITY: mod(tq, tasks_priority); break; \
106 case TQENT_LIST_DELAY: mod(tq, tasks_delayed); break; \
109 #define TQSTAT_INC_LIST(tq, t) _TQSTAT_MOD_LIST(TQSTAT_INC, tq, t)
110 #define TQSTAT_DEC_LIST(tq, t) _TQSTAT_MOD_LIST(TQSTAT_DEC, tq, t)
180 taskq_t *tq;
183 tq = list_entry(tql, taskq_t, tq_taskqs);
184 if (strcmp(name, tq->tq_name) == 0)
185 return (tq->tq_instance);
191 * NOTE: Must be called with tq->tq_lock held, returns a list_t which
195 task_alloc(taskq_t *tq, uint_t flags, unsigned long *irqflags)
200 ASSERT(tq);
203 if (!list_empty(&tq->tq_free_list) && !(flags & TQ_NEW)) {
204 t = list_entry(tq->tq_free_list.next, taskq_ent_t, tqent_list);
211 TQSTAT_DEC(tq, entries_free);
220 if (tq->tq_nalloc >= tq->tq_maxalloc) {
231 * of allocated tasks is above tq->tq_maxalloc, but we still
235 spin_unlock_irqrestore(&tq->tq_lock, *irqflags);
237 spin_lock_irqsave_nested(&tq->tq_lock, *irqflags,
238 tq->tq_lock_class);
245 spin_unlock_irqrestore(&tq->tq_lock, *irqflags);
247 spin_lock_irqsave_nested(&tq->tq_lock, *irqflags, tq->tq_lock_class);
251 tq->tq_nalloc++;
258 * NOTE: Must be called with tq->tq_lock held, expects the taskq_ent_t
262 task_free(taskq_t *tq, taskq_ent_t *t)
264 ASSERT(tq);
270 tq->tq_nalloc--;
274 * NOTE: Must be called with tq->tq_lock held, either destroys the
278 task_done(taskq_t *tq, taskq_ent_t *t)
280 ASSERT(tq);
287 if (tq->tq_nalloc <= tq->tq_minalloc) {
293 list_add_tail(&t->tqent_list, &tq->tq_free_list);
294 TQSTAT_INC(tq, entries_free);
296 task_free(tq, t);
308 taskq_t *tq = t->tqent_taskq;
312 spin_lock_irqsave_nested(&tq->tq_lock, flags, tq->tq_lock_class);
316 spin_unlock_irqrestore(&tq->tq_lock, flags);
328 list_for_each_prev(l, &tq->tq_prio_list) {
335 if (l == &tq->tq_prio_list)
336 list_add(&t->tqent_list, &tq->tq_prio_list);
338 spin_unlock_irqrestore(&tq->tq_lock, flags);
340 wake_up(&tq->tq_work_waitq);
342 TQSTAT_INC(tq, tasks_delayed_requeued);
360 taskq_lowest_id(taskq_t *tq)
362 taskqid_t lowest_id = tq->tq_next_id;
366 if (!list_empty(&tq->tq_pend_list)) {
367 t = list_entry(tq->tq_pend_list.next, taskq_ent_t, tqent_list);
371 if (!list_empty(&tq->tq_prio_list)) {
372 t = list_entry(tq->tq_prio_list.next, taskq_ent_t, tqent_list);
376 if (!list_empty(&tq->tq_delay_list)) {
377 t = list_entry(tq->tq_delay_list.next, taskq_ent_t, tqent_list);
381 if (!list_empty(&tq->tq_active_list)) {
382 tqt = list_entry(tq->tq_active_list.next, taskq_thread_t,
395 taskq_insert_in_order(taskq_t *tq, taskq_thread_t *tqt)
400 ASSERT(tq);
403 list_for_each_prev(l, &tq->tq_active_list) {
410 if (l == &tq->tq_active_list)
411 list_add(&tqt->tqt_active_list, &tq->tq_active_list);
419 taskq_find_list(taskq_t *tq, struct list_head *lh, taskqid_t id)
444 taskq_find(taskq_t *tq, taskqid_t id)
450 t = taskq_find_list(tq, &tq->tq_delay_list, id);
454 t = taskq_find_list(tq, &tq->tq_prio_list, id);
458 t = taskq_find_list(tq, &tq->tq_pend_list, id);
462 list_for_each(l, &tq->tq_active_list) {
505 taskq_wait_id_check(taskq_t *tq, taskqid_t id)
510 spin_lock_irqsave_nested(&tq->tq_lock, flags, tq->tq_lock_class);
511 rc = (taskq_find(tq, id) == NULL);
512 spin_unlock_irqrestore(&tq->tq_lock, flags);
522 taskq_wait_id(taskq_t *tq, taskqid_t id)
524 wait_event(tq->tq_wait_waitq, taskq_wait_id_check(tq, id));
529 taskq_wait_outstanding_check(taskq_t *tq, taskqid_t id)
534 spin_lock_irqsave_nested(&tq->tq_lock, flags, tq->tq_lock_class);
535 rc = (id < tq->tq_lowest_id);
536 spin_unlock_irqrestore(&tq->tq_lock, flags);
549 taskq_wait_outstanding(taskq_t *tq, taskqid_t id)
551 id = id ? id : tq->tq_next_id - 1;
552 wait_event(tq->tq_wait_waitq, taskq_wait_outstanding_check(tq, id));
557 taskq_wait_check(taskq_t *tq)
562 spin_lock_irqsave_nested(&tq->tq_lock, flags, tq->tq_lock_class);
563 rc = (tq->tq_lowest_id == tq->tq_next_id);
564 spin_unlock_irqrestore(&tq->tq_lock, flags);
575 taskq_wait(taskq_t *tq)
577 wait_event(tq->tq_wait_waitq, taskq_wait_check(tq));
582 taskq_member(taskq_t *tq, kthread_t *t)
584 return (tq == (taskq_t *)tsd_get_by_thread(taskq_tsd, t));
602 taskq_cancel_id(taskq_t *tq, taskqid_t id)
608 ASSERT(tq);
610 spin_lock_irqsave_nested(&tq->tq_lock, flags, tq->tq_lock_class);
611 t = taskq_find(tq, id);
614 TQSTAT_DEC_LIST(tq, t);
615 TQSTAT_DEC(tq, tasks_total);
618 TQSTAT_INC(tq, tasks_cancelled);
624 if (tq->tq_lowest_id == t->tqent_id) {
625 tq->tq_lowest_id = taskq_lowest_id(tq);
626 ASSERT3S(tq->tq_lowest_id, >, t->tqent_id);
630 * The task_expire() function takes the tq->tq_lock so drop
634 spin_unlock_irqrestore(&tq->tq_lock, flags);
636 spin_lock_irqsave_nested(&tq->tq_lock, flags,
637 tq->tq_lock_class);
641 task_done(tq, t);
645 spin_unlock_irqrestore(&tq->tq_lock, flags);
648 taskq_wait_id(tq, id);
656 static int taskq_thread_spawn(taskq_t *tq);
659 taskq_dispatch(taskq_t *tq, task_func_t func, void *arg, uint_t flags)
665 ASSERT(tq);
668 spin_lock_irqsave_nested(&tq->tq_lock, irqflags, tq->tq_lock_class);
671 if (!(tq->tq_flags & TASKQ_ACTIVE))
675 ASSERT(tq->tq_nactive <= tq->tq_nthreads);
676 if ((flags & TQ_NOQUEUE) && (tq->tq_nactive == tq->tq_nthreads)) {
678 if (taskq_thread_spawn(tq) == 0)
682 if ((t = task_alloc(tq, flags, &irqflags)) == NULL)
690 list_add(&t->tqent_list, &tq->tq_prio_list);
694 list_add_tail(&t->tqent_list, &tq->tq_prio_list);
697 list_add_tail(&t->tqent_list, &tq->tq_pend_list);
699 TQSTAT_INC_LIST(tq, t);
700 TQSTAT_INC(tq, tasks_total);
702 t->tqent_id = rc = tq->tq_next_id;
703 tq->tq_next_id++;
706 t->tqent_taskq = tq;
717 wake_up(&tq->tq_work_waitq);
719 TQSTAT_INC(tq, tasks_dispatched);
722 if (!(flags & TQ_NOQUEUE) && tq->tq_nactive == tq->tq_nthreads)
723 (void) taskq_thread_spawn(tq);
725 spin_unlock_irqrestore(&tq->tq_lock, irqflags);
731 taskq_dispatch_delay(taskq_t *tq, task_func_t func, void *arg,
738 ASSERT(tq);
741 spin_lock_irqsave_nested(&tq->tq_lock, irqflags, tq->tq_lock_class);
744 if (!(tq->tq_flags & TASKQ_ACTIVE))
747 if ((t = task_alloc(tq, flags, &irqflags)) == NULL)
753 list_add_tail(&t->tqent_list, &tq->tq_delay_list);
755 TQSTAT_INC_LIST(tq, t);
756 TQSTAT_INC(tq, tasks_total);
758 t->tqent_id = rc = tq->tq_next_id;
759 tq->tq_next_id++;
762 t->tqent_taskq = tq;
771 TQSTAT_INC(tq, tasks_dispatched_delayed);
774 if (tq->tq_nactive == tq->tq_nthreads)
775 (void) taskq_thread_spawn(tq);
777 spin_unlock_irqrestore(&tq->tq_lock, irqflags);
783 taskq_dispatch_ent(taskq_t *tq, task_func_t func, void *arg, uint_t flags,
787 ASSERT(tq);
790 spin_lock_irqsave_nested(&tq->tq_lock, irqflags,
791 tq->tq_lock_class);
794 if (!(tq->tq_flags & TASKQ_ACTIVE)) {
799 if ((flags & TQ_NOQUEUE) && (tq->tq_nactive == tq->tq_nthreads)) {
801 if (taskq_thread_spawn(tq) == 0)
823 list_add_tail(&t->tqent_list, &tq->tq_prio_list);
826 list_add_tail(&t->tqent_list, &tq->tq_pend_list);
828 TQSTAT_INC_LIST(tq, t);
829 TQSTAT_INC(tq, tasks_total);
831 t->tqent_id = tq->tq_next_id;
832 tq->tq_next_id++;
835 t->tqent_taskq = tq;
842 wake_up(&tq->tq_work_waitq);
844 TQSTAT_INC(tq, tasks_dispatched);
847 if (tq->tq_nactive == tq->tq_nthreads)
848 (void) taskq_thread_spawn(tq);
850 spin_unlock_irqrestore(&tq->tq_lock, irqflags);
881 taskq_next_ent(taskq_t *tq)
885 if (!list_empty(&tq->tq_prio_list))
886 list = &tq->tq_prio_list;
887 else if (!list_empty(&tq->tq_pend_list))
888 list = &tq->tq_pend_list;
901 taskq_t *tq = (taskq_t *)arg;
904 if (taskq_thread_create(tq) == NULL) {
906 spin_lock_irqsave_nested(&tq->tq_lock, flags,
907 tq->tq_lock_class);
908 tq->tq_nspawn--;
909 spin_unlock_irqrestore(&tq->tq_lock, flags);
921 taskq_thread_spawn(taskq_t *tq)
925 if (!(tq->tq_flags & TASKQ_DYNAMIC))
928 tq->lastspawnstop = jiffies;
929 if ((tq->tq_nthreads + tq->tq_nspawn < tq->tq_maxthreads) &&
930 (tq->tq_flags & TASKQ_ACTIVE)) {
931 spawning = (++tq->tq_nspawn);
933 tq, TQ_NOSLEEP);
949 taskq_thread_should_stop(taskq_t *tq, taskq_thread_t *tqt)
951 ASSERT(!taskq_next_ent(tq));
952 if (!(tq->tq_flags & TASKQ_DYNAMIC) || !spl_taskq_thread_dynamic)
954 if (!(tq->tq_flags & TASKQ_ACTIVE))
956 if (list_first_entry(&(tq->tq_thread_list), taskq_thread_t,
959 ASSERT3U(tq->tq_nthreads, >, 1);
960 if (tq->tq_nspawn != 0)
962 if (time_before(jiffies, tq->lastspawnstop +
965 tq->lastspawnstop = jiffies;
975 taskq_t *tq;
983 tq = tqt->tqt_tq;
992 tsd_set(taskq_tsd, tq);
993 spin_lock_irqsave_nested(&tq->tq_lock, flags, tq->tq_lock_class);
1000 if (tq->tq_flags & TASKQ_DYNAMIC)
1001 tq->tq_nspawn--;
1004 if (tq->tq_nthreads >= tq->tq_maxthreads)
1007 tq->tq_nthreads++;
1008 list_add_tail(&tqt->tqt_thread_list, &tq->tq_thread_list);
1009 wake_up(&tq->tq_wait_waitq);
1012 TQSTAT_INC(tq, threads_total);
1016 if (list_empty(&tq->tq_pend_list) &&
1017 list_empty(&tq->tq_prio_list)) {
1019 if (taskq_thread_should_stop(tq, tqt))
1022 add_wait_queue_exclusive(&tq->tq_work_waitq, &wait);
1023 spin_unlock_irqrestore(&tq->tq_lock, flags);
1025 TQSTAT_INC(tq, thread_sleeps);
1026 TQSTAT_INC(tq, threads_idle);
1031 TQSTAT_DEC(tq, threads_idle);
1032 TQSTAT_INC(tq, thread_wakeups);
1034 spin_lock_irqsave_nested(&tq->tq_lock, flags,
1035 tq->tq_lock_class);
1036 remove_wait_queue(&tq->tq_work_waitq, &wait);
1041 if ((t = taskq_next_ent(tq)) != NULL) {
1043 TQSTAT_DEC_LIST(tq, t);
1044 TQSTAT_DEC(tq, tasks_total);
1065 taskq_insert_in_order(tq, tqt);
1066 tq->tq_nactive++;
1067 spin_unlock_irqrestore(&tq->tq_lock, flags);
1069 TQSTAT_INC(tq, threads_active);
1077 TQSTAT_DEC(tq, threads_active);
1080 TQSTAT_INC(tq, tasks_executed_normal);
1082 TQSTAT_INC(tq, tasks_executed_priority);
1083 TQSTAT_INC(tq, tasks_executed);
1085 spin_lock_irqsave_nested(&tq->tq_lock, flags,
1086 tq->tq_lock_class);
1088 tq->tq_nactive--;
1094 task_done(tq, t);
1100 if (tq->tq_lowest_id == tqt->tqt_id) {
1101 tq->tq_lowest_id = taskq_lowest_id(tq);
1102 ASSERT3S(tq->tq_lowest_id, >, tqt->tqt_id);
1107 taskq_thread_spawn(tq))
1112 wake_up_all(&tq->tq_wait_waitq);
1114 TQSTAT_INC(tq, thread_wakeups_nowork);
1121 tq->tq_nthreads--;
1124 TQSTAT_DEC(tq, threads_total);
1125 TQSTAT_INC(tq, threads_destroyed);
1129 spin_unlock_irqrestore(&tq->tq_lock, flags);
1138 taskq_thread_create(taskq_t *tq)
1146 tqt->tqt_tq = tq;
1150 "%s", tq->tq_name);
1162 set_user_nice(tqt->tqt_thread, PRIO_TO_NICE(tq->tq_pri));
1166 TQSTAT_INC(tq, threads_created);
1172 taskq_stats_init(taskq_t *tq)
1174 taskq_sums_t *tqs = &tq->tq_sums;
1198 taskq_stats_fini(taskq_t *tq)
1200 taskq_sums_t *tqs = &tq->tq_sums;
1229 taskq_t *tq = ksp->ks_private;
1232 tqks->tqks_threads_max.value.ui64 = tq->tq_maxthreads;
1233 tqks->tqks_entry_pool_min.value.ui64 = tq->tq_minalloc;
1234 tqks->tqks_entry_pool_max.value.ui64 = tq->tq_maxalloc;
1236 taskq_sums_t *tqs = &tq->tq_sums;
1283 taskq_kstats_init(taskq_t *tq)
1286 snprintf(name, sizeof (name), "%s.%d", tq->tq_name, tq->tq_instance);
1295 ksp->ks_private = tq;
1301 tq->tq_ksp = ksp;
1305 taskq_kstats_fini(taskq_t *tq)
1307 if (tq->tq_ksp == NULL)
1310 kmem_free(tq->tq_ksp->ks_data, sizeof (taskq_kstats_t));
1311 kstat_delete(tq->tq_ksp);
1313 tq->tq_ksp = NULL;
1320 taskq_t *tq;
1339 tq = kmem_alloc(sizeof (*tq), KM_PUSHPAGE);
1340 if (tq == NULL)
1343 tq->tq_hp_support = B_FALSE;
1346 tq->tq_hp_support = B_TRUE;
1348 &tq->tq_hp_cb_node) != 0) {
1349 kmem_free(tq, sizeof (*tq));
1354 spin_lock_init(&tq->tq_lock);
1355 INIT_LIST_HEAD(&tq->tq_thread_list);
1356 INIT_LIST_HEAD(&tq->tq_active_list);
1357 tq->tq_name = kmem_strdup(name);
1358 tq->tq_nactive = 0;
1359 tq->tq_nthreads = 0;
1360 tq->tq_nspawn = 0;
1361 tq->tq_maxthreads = nthreads;
1362 tq->tq_cpu_pct = threads_arg;
1363 tq->tq_pri = pri;
1364 tq->tq_minalloc = minalloc;
1365 tq->tq_maxalloc = maxalloc;
1366 tq->tq_nalloc = 0;
1367 tq->tq_flags = (flags | TASKQ_ACTIVE);
1368 tq->tq_next_id = TASKQID_INITIAL;
1369 tq->tq_lowest_id = TASKQID_INITIAL;
1370 tq->lastspawnstop = jiffies;
1371 INIT_LIST_HEAD(&tq->tq_free_list);
1372 INIT_LIST_HEAD(&tq->tq_pend_list);
1373 INIT_LIST_HEAD(&tq->tq_prio_list);
1374 INIT_LIST_HEAD(&tq->tq_delay_list);
1375 init_waitqueue_head(&tq->tq_work_waitq);
1376 init_waitqueue_head(&tq->tq_wait_waitq);
1377 tq->tq_lock_class = TQ_LOCK_GENERAL;
1378 INIT_LIST_HEAD(&tq->tq_taskqs);
1379 taskq_stats_init(tq);
1382 spin_lock_irqsave_nested(&tq->tq_lock, irqflags,
1383 tq->tq_lock_class);
1386 task_done(tq, task_alloc(tq, TQ_PUSHPAGE | TQ_NEW,
1389 spin_unlock_irqrestore(&tq->tq_lock, irqflags);
1396 tqt = taskq_thread_create(tq);
1404 wait_event(tq->tq_wait_waitq, tq->tq_nthreads == count);
1409 tq->tq_nspawn = 0;
1412 taskq_destroy(tq);
1417 tq->tq_instance = taskq_find_by_name(name) + 1;
1418 list_add_tail(&tq->tq_taskqs, &tq_list);
1422 taskq_kstats_init(tq);
1424 return (tq);
1429 taskq_destroy(taskq_t *tq)
1436 ASSERT(tq);
1437 spin_lock_irqsave_nested(&tq->tq_lock, flags, tq->tq_lock_class);
1438 tq->tq_flags &= ~TASKQ_ACTIVE;
1439 spin_unlock_irqrestore(&tq->tq_lock, flags);
1441 if (tq->tq_hp_support) {
1443 spl_taskq_cpuhp_state, &tq->tq_hp_cb_node));
1453 taskq_wait(tq);
1455 taskq_kstats_fini(tq);
1459 list_del(&tq->tq_taskqs);
1462 spin_lock_irqsave_nested(&tq->tq_lock, flags, tq->tq_lock_class);
1464 while (tq->tq_nspawn) {
1465 spin_unlock_irqrestore(&tq->tq_lock, flags);
1467 spin_lock_irqsave_nested(&tq->tq_lock, flags,
1468 tq->tq_lock_class);
1477 while (!list_empty(&tq->tq_thread_list)) {
1478 tqt = list_entry(tq->tq_thread_list.next,
1481 spin_unlock_irqrestore(&tq->tq_lock, flags);
1485 spin_lock_irqsave_nested(&tq->tq_lock, flags,
1486 tq->tq_lock_class);
1489 while (!list_empty(&tq->tq_free_list)) {
1490 t = list_entry(tq->tq_free_list.next, taskq_ent_t, tqent_list);
1495 task_free(tq, t);
1498 ASSERT0(tq->tq_nthreads);
1499 ASSERT0(tq->tq_nalloc);
1500 ASSERT0(tq->tq_nspawn);
1501 ASSERT(list_empty(&tq->tq_thread_list));
1502 ASSERT(list_empty(&tq->tq_active_list));
1503 ASSERT(list_empty(&tq->tq_free_list));
1504 ASSERT(list_empty(&tq->tq_pend_list));
1505 ASSERT(list_empty(&tq->tq_prio_list));
1506 ASSERT(list_empty(&tq->tq_delay_list));
1508 spin_unlock_irqrestore(&tq->tq_lock, flags);
1510 taskq_stats_fini(tq);
1511 kmem_strfree(tq->tq_name);
1512 kmem_free(tq, sizeof (taskq_t));
1526 taskq_t *tq;
1535 tq = taskq_create(name, nthreads, minclsyspri, nthreads, INT_MAX,
1537 VERIFY(tq != NULL);
1538 VERIFY(tq->tq_nthreads == nthreads);
1540 list_for_each_entry(tqt, &tq->tq_thread_list, tqt_thread_list) {
1548 return (tq);
1572 taskq_t *tq;
1581 tq = list_entry(tql, taskq_t, tq_taskqs);
1583 mutex_enter(tq->tq_ksp->ks_lock);
1584 taskq_kstats_update(tq->tq_ksp, KSTAT_READ);
1585 taskq_kstats_t *tqks = tq->tq_ksp->ks_data;
1587 snprintf(name, sizeof (name), "%s.%d", tq->tq_name,
1588 tq->tq_instance);
1600 mutex_exit(tq->tq_ksp->ks_lock);
1662 taskq_t *tq = NULL;
1673 list_for_each_entry(tq, &tq_list, tq_taskqs) {
1674 spin_lock_irqsave_nested(&tq->tq_lock, flags,
1675 tq->tq_lock_class);
1677 t = taskq_next_ent(tq);
1679 (void) taskq_thread_spawn(tq);
1681 tq->tq_name, tq->tq_instance);
1683 spin_unlock_irqrestore(&tq->tq_lock, flags);
1711 taskq_t *tq = list_entry(node, taskq_t, tq_hp_cb_node);
1715 ASSERT(tq);
1716 spin_lock_irqsave_nested(&tq->tq_lock, flags, tq->tq_lock_class);
1718 if (!(tq->tq_flags & TASKQ_ACTIVE)) {
1719 spin_unlock_irqrestore(&tq->tq_lock, flags);
1723 ASSERT(tq->tq_flags & TASKQ_THREADS_CPU_PCT);
1724 int nthreads = MIN(tq->tq_cpu_pct, 100);
1726 tq->tq_maxthreads = nthreads;
1728 if (!((tq->tq_flags & TASKQ_DYNAMIC) && spl_taskq_thread_dynamic) &&
1729 tq->tq_maxthreads > tq->tq_nthreads) {
1730 spin_unlock_irqrestore(&tq->tq_lock, flags);
1731 taskq_thread_t *tqt = taskq_thread_create(tq);
1736 spin_unlock_irqrestore(&tq->tq_lock, flags);
1748 taskq_t *tq = list_entry(node, taskq_t, tq_hp_cb_node);
1751 ASSERT(tq);
1752 spin_lock_irqsave_nested(&tq->tq_lock, flags, tq->tq_lock_class);
1754 if (!(tq->tq_flags & TASKQ_ACTIVE))
1757 ASSERT(tq->tq_flags & TASKQ_THREADS_CPU_PCT);
1758 int nthreads = MIN(tq->tq_cpu_pct, 100);
1760 tq->tq_maxthreads = nthreads;
1762 if (!((tq->tq_flags & TASKQ_DYNAMIC) && spl_taskq_thread_dynamic) &&
1763 tq->tq_maxthreads < tq->tq_nthreads) {
1764 ASSERT3U(tq->tq_maxthreads, ==, tq->tq_nthreads - 1);
1765 taskq_thread_t *tqt = list_entry(tq->tq_thread_list.next,
1768 spin_unlock_irqrestore(&tq->tq_lock, flags);
1776 spin_unlock_irqrestore(&tq->tq_lock, flags);