Lines Matching defs:kd

150 	struct kcov_dev *kd;
153 kd = kd_curproc(KCOV_MODE_TRACE_PC);
154 if (kd == NULL)
157 if ((idx = kd_claim(kd, KCOV_STRIDE_TRACE_PC, 1)))
158 kd->kd_buf[idx] = (uintptr_t)__builtin_return_address(0);
170 trace_cmp(struct kcov_dev *kd, uint64_t type, uint64_t arg1, uint64_t arg2,
175 if ((idx = kd_claim(kd, KCOV_STRIDE_TRACE_CMP, 1))) {
176 kd->kd_buf[idx] = type;
177 kd->kd_buf[idx + 1] = arg1;
178 kd->kd_buf[idx + 2] = arg2;
179 kd->kd_buf[idx + 3] = pc;
184 struct kcov_dev *kd; \
185 if ((kd = kd_curproc(KCOV_MODE_TRACE_CMP)) == NULL) \
187 trace_cmp(kd, (type), (arg1), (arg2), \
242 struct kcov_dev *kd;
246 kd = kd_curproc(KCOV_MODE_TRACE_CMP);
247 if (kd == NULL)
273 trace_cmp(kd, type, cases[i + 2], val, pc);
301 struct kcov_dev *kd;
303 kd = malloc(sizeof(*kd), M_SUBPROC, M_WAITOK | M_ZERO);
304 kd->kd_unit = minor(dev);
306 KASSERT(kd_lookup(kd->kd_unit) == NULL);
307 TAILQ_INSERT_TAIL(&kd_list, kd, kd_entry);
317 struct kcov_dev *kd;
321 kd = kd_lookup(minor(dev));
322 if (kd == NULL) {
327 TAILQ_REMOVE(&kd_list, kd, kd_entry);
328 if (kd->kd_state == KCOV_STATE_TRACE && kd->kd_kr == NULL) {
333 kd->kd_state = KCOV_STATE_DYING;
334 kd->kd_mode = KCOV_MODE_NONE;
336 kd_free(kd);
346 struct kcov_dev *kd;
352 kd = kd_lookup(minor(dev));
353 if (kd == NULL) {
360 error = kd_init(kd, *((unsigned long *)data));
368 if (kd->kd_state != KCOV_STATE_READY) {
377 kd->kd_state = KCOV_STATE_TRACE;
378 kd->kd_mode = mode;
380 if (kd->kd_kr == NULL)
381 p->p_kd = kd;
385 if ((p->p_kd != kd && kd->kd_kr == NULL)) {
389 if (kd->kd_state != KCOV_STATE_TRACE) {
393 kd->kd_state = KCOV_STATE_READY;
394 kd->kd_mode = KCOV_MODE_NONE;
395 if (kd->kd_kr != NULL)
396 kr_barrier(kd->kd_kr);
400 error = kcov_remote_attach(kd,
414 struct kcov_dev *kd;
420 kd = kd_lookup(minor(dev));
421 if (kd == NULL)
424 if (offset < 0 || offset >= kd->kd_nmemb * KCOV_BUF_MEMB_SIZE)
427 va = (vaddr_t)kd->kd_buf + offset;
439 struct kcov_dev *kd;
443 kd = p->p_kd;
444 if (kd == NULL) {
449 if (kd->kd_state == KCOV_STATE_DYING) {
451 kd_free(kd);
453 kd->kd_state = KCOV_STATE_READY;
454 kd->kd_mode = KCOV_MODE_NONE;
455 if (kd->kd_kr != NULL)
456 kr_barrier(kd->kd_kr);
476 struct kcov_dev *kd;
480 TAILQ_FOREACH(kd, &kd_list, kd_entry) {
481 if (kd->kd_unit == unit)
482 return (kd);
509 kd_init(struct kcov_dev *kd, unsigned long nmemb)
515 KASSERT(kd->kd_buf == NULL);
517 if (kd->kd_state != KCOV_STATE_NONE)
531 if (kd->kd_state != KCOV_STATE_NONE) {
536 kd->kd_buf = buf;
538 kd->kd_nmemb = nmemb - 1;
539 kd->kd_size = size;
540 kd->kd_state = KCOV_STATE_READY;
551 kd_free(struct kcov_dev *kd)
557 kr = kd->kd_kr;
559 kcov_remote_detach(kd, kr);
561 if (kd->kd_buf != NULL) {
563 km_free(kd->kd_buf, kd->kd_size, &kv_any, &kp_zero);
566 free(kd, M_SUBPROC, sizeof(*kd));
573 struct kcov_dev *kd;
584 kd = ci->ci_curproc->p_kd;
585 if (__predict_true(kd == NULL) || kd->kd_mode != mode)
596 if (inintr(ci) && kd->kd_intr == 0)
599 return (kd);
622 kd_claim(struct kcov_dev *kd, int stride, int nmemb)
626 idx = kd->kd_buf[0];
628 if (stride * (idx + nmemb) > kd->kd_nmemb)
631 was = atomic_cas_ulong(&kd->kd_buf[0], idx, idx + nmemb);
643 struct kcov_dev *kd;
651 kd = kr->kr_kd;
652 if (kd == NULL || kd->kd_state != KCOV_STATE_TRACE)
665 kc->kc_kd.kd_mode = kd->kd_mode;
668 kd = &kc->kc_kd;
670 kd->kd_buf[0] = 0;
675 p->p_kd = kd;
781 kcov_remote_attach(struct kcov_dev *kd, struct kio_remote_attach *arg)
787 if (kd->kd_state != KCOV_STATE_READY)
800 kr->kr_kd = kd;
801 kd->kd_kr = kr;
806 kcov_remote_detach(struct kcov_dev *kd, struct kcov_remote *kr)
810 KASSERT(kd == kr->kr_kd);
816 kd->kd_kr = NULL;