Lines Matching defs:napi
103 linuxkpi_napi_schedule_prep(struct napi_struct *napi)
107 NAPI_TRACE(napi);
111 old = READ_ONCE(napi->state);
115 NAPI_TRACE(napi);
125 } while (atomic_cmpset_acq_long(&napi->state, old, new) == 0);
127 NAPI_TRACE(napi);
132 lkpi___napi_schedule_dd(struct napi_struct *napi)
139 NAPI_TRACE2D(napi, rc);
140 if (napi->poll != NULL)
141 rc = napi->poll(napi, napi->budget);
142 napi->rx_count += rc;
146 if (rc >= napi->budget)
150 if (test_bit(LKPI_NAPI_FLAG_LOST_RACE_TRY_AGAIN, &napi->state))
154 new = old = READ_ONCE(napi->state);
157 } while (atomic_cmpset_acq_long(&napi->state, old, new) == 0);
159 NAPI_TRACE2D(napi, rc);
163 linuxkpi___napi_schedule(struct napi_struct *napi)
167 NAPI_TRACE(napi);
168 if (test_bit(LKPI_NAPI_FLAG_SHUTDOWN, &napi->state)) {
169 clear_bit(LKPI_NAPI_FLAG_LOST_RACE_TRY_AGAIN, &napi->state);
170 clear_bit(LKPI_NAPI_FLAG_IS_SCHEDULED, &napi->state);
171 NAPI_TRACE(napi);
176 lkpi___napi_schedule_dd(napi);
178 rc = taskqueue_enqueue(napi->dev->napi_tq, &napi->napi_task);
179 NAPI_TRACE2D(napi, rc);
188 linuxkpi_napi_schedule(struct napi_struct *napi)
191 NAPI_TRACE(napi);
197 if (napi_schedule_prep(napi)) {
198 __napi_schedule(napi);
206 linuxkpi_napi_reschedule(struct napi_struct *napi)
209 NAPI_TRACE(napi);
212 if (napi_schedule_prep(napi))
213 __napi_schedule(napi);
217 linuxkpi_napi_complete_done(struct napi_struct *napi, int ret)
221 NAPI_TRACE(napi);
226 new = old = READ_ONCE(napi->state);
235 } while (atomic_cmpset_acq_long(&napi->state, old, new) == 0);
237 NAPI_TRACE(napi);
241 __napi_schedule(napi);
249 linuxkpi_napi_complete(struct napi_struct *napi)
252 NAPI_TRACE(napi);
253 return (napi_complete_done(napi, 0));
257 linuxkpi_napi_disable(struct napi_struct *napi)
259 NAPI_TRACE(napi);
260 set_bit(LKPI_NAPI_FLAG_DISABLE_PENDING, &napi->state);
261 while (test_bit(LKPI_NAPI_FLAG_IS_SCHEDULED, &napi->state))
263 clear_bit(LKPI_NAPI_FLAG_DISABLE_PENDING, &napi->state);
267 linuxkpi_napi_enable(struct napi_struct *napi)
270 NAPI_TRACE(napi);
271 KASSERT(!test_bit(LKPI_NAPI_FLAG_IS_SCHEDULED, &napi->state),
272 ("%s: enabling napi %p already scheduled\n", __func__, napi));
275 clear_bit(LKPI_NAPI_FLAG_IS_SCHEDULED, &napi->state);
279 linuxkpi_napi_synchronize(struct napi_struct *napi)
281 NAPI_TRACE(napi);
283 /* Check & sleep while a napi is scheduled. */
284 while (test_bit(LKPI_NAPI_FLAG_IS_SCHEDULED, &napi->state))
296 struct napi_struct *napi;
299 KASSERT(ctx != NULL, ("%s: napi %p, pending %d\n",
301 napi = ctx;
302 KASSERT(napi->poll != NULL, ("%s: napi %p poll is NULL\n",
303 __func__, napi));
305 NAPI_TRACE_TASK(napi, pending, napi->budget);
306 count = napi->poll(napi, napi->budget);
307 napi->rx_count += count;
308 NAPI_TRACE_TASK(napi, pending, count);
315 if (count >= napi->budget) {
322 __napi_schedule(napi);
329 linuxkpi_netif_napi_add(struct net_device *ndev, struct napi_struct *napi,
333 napi->dev = ndev;
334 napi->poll = napi_poll;
335 napi->budget = NAPI_POLL_WEIGHT;
337 INIT_LIST_HEAD(&napi->rx_list);
338 napi->rx_count = 0;
340 TASK_INIT(&napi->napi_task, 0, lkpi_napi_task, napi);
343 TAILQ_INSERT_TAIL(&ndev->napi_head, napi, entry);
347 clear_bit(LKPI_NAPI_FLAG_SHUTDOWN, &napi->state);
351 lkpi_netif_napi_del_locked(struct napi_struct *napi)
355 ndev = napi->dev;
358 set_bit(LKPI_NAPI_FLAG_SHUTDOWN, &napi->state);
359 TAILQ_REMOVE(&ndev->napi_head, napi, entry);
360 while (taskqueue_cancel(ndev->napi_tq, &napi->napi_task, NULL) != 0)
361 taskqueue_drain(ndev->napi_tq, &napi->napi_task);
365 linuxkpi_netif_napi_del(struct napi_struct *napi)
369 ndev = napi->dev;
371 lkpi_netif_napi_del_locked(napi);
392 "ndev napi taskq");
421 struct napi_struct *napi, *temp;
424 TAILQ_FOREACH_SAFE(napi, &ndev->napi_head, entry, temp) {
425 lkpi_netif_napi_del_locked(napi);