Lines Matching full:rx
19 static void gve_rx_free_hdr_bufs(struct gve_priv *priv, struct gve_rx_ring *rx) in gve_rx_free_hdr_bufs() argument
22 int buf_count = rx->dqo.bufq.mask + 1; in gve_rx_free_hdr_bufs()
24 if (rx->dqo.hdr_bufs.data) { in gve_rx_free_hdr_bufs()
26 rx->dqo.hdr_bufs.data, rx->dqo.hdr_bufs.addr); in gve_rx_free_hdr_bufs()
27 rx->dqo.hdr_bufs.data = NULL; in gve_rx_free_hdr_bufs()
31 static void gve_rx_init_ring_state_dqo(struct gve_rx_ring *rx, in gve_rx_init_ring_state_dqo() argument
38 rx->dqo.bufq.mask = buffer_queue_slots - 1; in gve_rx_init_ring_state_dqo()
39 rx->dqo.bufq.head = 0; in gve_rx_init_ring_state_dqo()
40 rx->dqo.bufq.tail = 0; in gve_rx_init_ring_state_dqo()
43 rx->dqo.complq.num_free_slots = completion_queue_slots; in gve_rx_init_ring_state_dqo()
44 rx->dqo.complq.mask = completion_queue_slots - 1; in gve_rx_init_ring_state_dqo()
45 rx->dqo.complq.cur_gen_bit = 0; in gve_rx_init_ring_state_dqo()
46 rx->dqo.complq.head = 0; in gve_rx_init_ring_state_dqo()
48 /* Set RX SKB context */ in gve_rx_init_ring_state_dqo()
49 rx->ctx.skb_head = NULL; in gve_rx_init_ring_state_dqo()
50 rx->ctx.skb_tail = NULL; in gve_rx_init_ring_state_dqo()
53 if (rx->dqo.buf_states) { in gve_rx_init_ring_state_dqo()
54 for (i = 0; i < rx->dqo.num_buf_states - 1; i++) in gve_rx_init_ring_state_dqo()
55 rx->dqo.buf_states[i].next = i + 1; in gve_rx_init_ring_state_dqo()
56 rx->dqo.buf_states[rx->dqo.num_buf_states - 1].next = -1; in gve_rx_init_ring_state_dqo()
59 rx->dqo.free_buf_states = 0; in gve_rx_init_ring_state_dqo()
60 rx->dqo.recycled_buf_states.head = -1; in gve_rx_init_ring_state_dqo()
61 rx->dqo.recycled_buf_states.tail = -1; in gve_rx_init_ring_state_dqo()
62 rx->dqo.used_buf_states.head = -1; in gve_rx_init_ring_state_dqo()
63 rx->dqo.used_buf_states.tail = -1; in gve_rx_init_ring_state_dqo()
68 struct gve_rx_ring *rx = &priv->rx[idx]; in gve_rx_reset_ring_dqo() local
76 if (rx->dqo.bufq.desc_ring) { in gve_rx_reset_ring_dqo()
77 size = sizeof(rx->dqo.bufq.desc_ring[0]) * in gve_rx_reset_ring_dqo()
79 memset(rx->dqo.bufq.desc_ring, 0, size); in gve_rx_reset_ring_dqo()
83 if (rx->dqo.complq.desc_ring) { in gve_rx_reset_ring_dqo()
84 size = sizeof(rx->dqo.complq.desc_ring[0]) * in gve_rx_reset_ring_dqo()
86 memset(rx->dqo.complq.desc_ring, 0, size); in gve_rx_reset_ring_dqo()
90 if (rx->q_resources) in gve_rx_reset_ring_dqo()
91 memset(rx->q_resources, 0, sizeof(*rx->q_resources)); in gve_rx_reset_ring_dqo()
94 if (rx->dqo.buf_states) { in gve_rx_reset_ring_dqo()
95 for (i = 0; i < rx->dqo.num_buf_states; i++) { in gve_rx_reset_ring_dqo()
96 struct gve_rx_buf_state_dqo *bs = &rx->dqo.buf_states[i]; in gve_rx_reset_ring_dqo()
98 if (rx->dqo.page_pool) in gve_rx_reset_ring_dqo()
99 gve_free_to_page_pool(rx, bs, false); in gve_rx_reset_ring_dqo()
105 gve_rx_init_ring_state_dqo(rx, buffer_queue_slots, in gve_rx_reset_ring_dqo()
112 struct gve_rx_ring *rx = &priv->rx[idx]; in gve_rx_stop_ring_dqo() local
117 if (rx->dqo.page_pool) in gve_rx_stop_ring_dqo()
118 page_pool_disable_direct_recycling(rx->dqo.page_pool); in gve_rx_stop_ring_dqo()
124 void gve_rx_free_ring_dqo(struct gve_priv *priv, struct gve_rx_ring *rx, in gve_rx_free_ring_dqo() argument
130 int idx = rx->q_num; in gve_rx_free_ring_dqo()
135 completion_queue_slots = rx->dqo.complq.mask + 1; in gve_rx_free_ring_dqo()
136 buffer_queue_slots = rx->dqo.bufq.mask + 1; in gve_rx_free_ring_dqo()
138 if (rx->q_resources) { in gve_rx_free_ring_dqo()
139 dma_free_coherent(hdev, sizeof(*rx->q_resources), in gve_rx_free_ring_dqo()
140 rx->q_resources, rx->q_resources_bus); in gve_rx_free_ring_dqo()
141 rx->q_resources = NULL; in gve_rx_free_ring_dqo()
144 for (i = 0; i < rx->dqo.num_buf_states; i++) { in gve_rx_free_ring_dqo()
145 struct gve_rx_buf_state_dqo *bs = &rx->dqo.buf_states[i]; in gve_rx_free_ring_dqo()
147 if (rx->dqo.page_pool) in gve_rx_free_ring_dqo()
148 gve_free_to_page_pool(rx, bs, false); in gve_rx_free_ring_dqo()
153 if (rx->dqo.qpl) { in gve_rx_free_ring_dqo()
154 qpl_id = gve_get_rx_qpl_id(cfg->qcfg_tx, rx->q_num); in gve_rx_free_ring_dqo()
155 gve_free_queue_page_list(priv, rx->dqo.qpl, qpl_id); in gve_rx_free_ring_dqo()
156 rx->dqo.qpl = NULL; in gve_rx_free_ring_dqo()
159 if (rx->dqo.bufq.desc_ring) { in gve_rx_free_ring_dqo()
160 size = sizeof(rx->dqo.bufq.desc_ring[0]) * buffer_queue_slots; in gve_rx_free_ring_dqo()
161 dma_free_coherent(hdev, size, rx->dqo.bufq.desc_ring, in gve_rx_free_ring_dqo()
162 rx->dqo.bufq.bus); in gve_rx_free_ring_dqo()
163 rx->dqo.bufq.desc_ring = NULL; in gve_rx_free_ring_dqo()
166 if (rx->dqo.complq.desc_ring) { in gve_rx_free_ring_dqo()
167 size = sizeof(rx->dqo.complq.desc_ring[0]) * in gve_rx_free_ring_dqo()
169 dma_free_coherent(hdev, size, rx->dqo.complq.desc_ring, in gve_rx_free_ring_dqo()
170 rx->dqo.complq.bus); in gve_rx_free_ring_dqo()
171 rx->dqo.complq.desc_ring = NULL; in gve_rx_free_ring_dqo()
174 kvfree(rx->dqo.buf_states); in gve_rx_free_ring_dqo()
175 rx->dqo.buf_states = NULL; in gve_rx_free_ring_dqo()
177 if (rx->dqo.page_pool) { in gve_rx_free_ring_dqo()
178 page_pool_destroy(rx->dqo.page_pool); in gve_rx_free_ring_dqo()
179 rx->dqo.page_pool = NULL; in gve_rx_free_ring_dqo()
182 gve_rx_free_hdr_bufs(priv, rx); in gve_rx_free_ring_dqo()
184 netif_dbg(priv, drv, priv->dev, "freed rx ring %d\n", idx); in gve_rx_free_ring_dqo()
187 static int gve_rx_alloc_hdr_bufs(struct gve_priv *priv, struct gve_rx_ring *rx, in gve_rx_alloc_hdr_bufs() argument
192 rx->dqo.hdr_bufs.data = dma_alloc_coherent(hdev, priv->header_buf_size * buf_count, in gve_rx_alloc_hdr_bufs()
193 &rx->dqo.hdr_bufs.addr, GFP_KERNEL); in gve_rx_alloc_hdr_bufs()
194 if (!rx->dqo.hdr_bufs.data) in gve_rx_alloc_hdr_bufs()
210 struct gve_rx_ring *rx, in gve_rx_alloc_ring_dqo() argument
222 netif_dbg(priv, drv, priv->dev, "allocating rx ring DQO\n"); in gve_rx_alloc_ring_dqo()
224 memset(rx, 0, sizeof(*rx)); in gve_rx_alloc_ring_dqo()
225 rx->gve = priv; in gve_rx_alloc_ring_dqo()
226 rx->q_num = idx; in gve_rx_alloc_ring_dqo()
228 rx->dqo.num_buf_states = cfg->raw_addressing ? buffer_queue_slots : in gve_rx_alloc_ring_dqo()
230 rx->dqo.buf_states = kvcalloc(rx->dqo.num_buf_states, in gve_rx_alloc_ring_dqo()
231 sizeof(rx->dqo.buf_states[0]), in gve_rx_alloc_ring_dqo()
233 if (!rx->dqo.buf_states) in gve_rx_alloc_ring_dqo()
238 if (gve_rx_alloc_hdr_bufs(priv, rx, buffer_queue_slots)) in gve_rx_alloc_ring_dqo()
241 /* Allocate RX completion queue */ in gve_rx_alloc_ring_dqo()
242 size = sizeof(rx->dqo.complq.desc_ring[0]) * in gve_rx_alloc_ring_dqo()
244 rx->dqo.complq.desc_ring = in gve_rx_alloc_ring_dqo()
245 dma_alloc_coherent(hdev, size, &rx->dqo.complq.bus, GFP_KERNEL); in gve_rx_alloc_ring_dqo()
246 if (!rx->dqo.complq.desc_ring) in gve_rx_alloc_ring_dqo()
249 /* Allocate RX buffer queue */ in gve_rx_alloc_ring_dqo()
250 size = sizeof(rx->dqo.bufq.desc_ring[0]) * buffer_queue_slots; in gve_rx_alloc_ring_dqo()
251 rx->dqo.bufq.desc_ring = in gve_rx_alloc_ring_dqo()
252 dma_alloc_coherent(hdev, size, &rx->dqo.bufq.bus, GFP_KERNEL); in gve_rx_alloc_ring_dqo()
253 if (!rx->dqo.bufq.desc_ring) in gve_rx_alloc_ring_dqo()
257 pool = gve_rx_create_page_pool(priv, rx); in gve_rx_alloc_ring_dqo()
261 rx->dqo.page_pool = pool; in gve_rx_alloc_ring_dqo()
263 qpl_id = gve_get_rx_qpl_id(cfg->qcfg_tx, rx->q_num); in gve_rx_alloc_ring_dqo()
266 rx->dqo.qpl = gve_alloc_queue_page_list(priv, qpl_id, in gve_rx_alloc_ring_dqo()
268 if (!rx->dqo.qpl) in gve_rx_alloc_ring_dqo()
270 rx->dqo.next_qpl_page_idx = 0; in gve_rx_alloc_ring_dqo()
273 rx->q_resources = dma_alloc_coherent(hdev, sizeof(*rx->q_resources), in gve_rx_alloc_ring_dqo()
274 &rx->q_resources_bus, GFP_KERNEL); in gve_rx_alloc_ring_dqo()
275 if (!rx->q_resources) in gve_rx_alloc_ring_dqo()
278 gve_rx_init_ring_state_dqo(rx, buffer_queue_slots, in gve_rx_alloc_ring_dqo()
284 gve_rx_free_ring_dqo(priv, rx, cfg); in gve_rx_alloc_ring_dqo()
290 const struct gve_rx_ring *rx = &priv->rx[queue_idx]; in gve_rx_write_doorbell_dqo() local
291 u64 index = be32_to_cpu(rx->q_resources->db_index); in gve_rx_write_doorbell_dqo()
293 iowrite32(rx->dqo.bufq.tail, &priv->db_bar2[index]); in gve_rx_write_doorbell_dqo()
299 struct gve_rx_ring *rx; in gve_rx_alloc_rings_dqo() local
303 rx = kvcalloc(cfg->qcfg->max_queues, sizeof(struct gve_rx_ring), in gve_rx_alloc_rings_dqo()
305 if (!rx) in gve_rx_alloc_rings_dqo()
309 err = gve_rx_alloc_ring_dqo(priv, cfg, &rx[i], i); in gve_rx_alloc_rings_dqo()
312 "Failed to alloc rx ring=%d: err=%d\n", in gve_rx_alloc_rings_dqo()
318 cfg->rx = rx; in gve_rx_alloc_rings_dqo()
323 gve_rx_free_ring_dqo(priv, &rx[i], cfg); in gve_rx_alloc_rings_dqo()
324 kvfree(rx); in gve_rx_alloc_rings_dqo()
331 struct gve_rx_ring *rx = cfg->rx; in gve_rx_free_rings_dqo() local
334 if (!rx) in gve_rx_free_rings_dqo()
338 gve_rx_free_ring_dqo(priv, &rx[i], cfg); in gve_rx_free_rings_dqo()
340 kvfree(rx); in gve_rx_free_rings_dqo()
341 cfg->rx = NULL; in gve_rx_free_rings_dqo()
344 void gve_rx_post_buffers_dqo(struct gve_rx_ring *rx) in gve_rx_post_buffers_dqo() argument
346 struct gve_rx_compl_queue_dqo *complq = &rx->dqo.complq; in gve_rx_post_buffers_dqo()
347 struct gve_rx_buf_queue_dqo *bufq = &rx->dqo.bufq; in gve_rx_post_buffers_dqo()
348 struct gve_priv *priv = rx->gve; in gve_rx_post_buffers_dqo()
360 if (unlikely(gve_alloc_buffer(rx, desc))) { in gve_rx_post_buffers_dqo()
361 u64_stats_update_begin(&rx->statss); in gve_rx_post_buffers_dqo()
362 rx->rx_buf_alloc_fail++; in gve_rx_post_buffers_dqo()
363 u64_stats_update_end(&rx->statss); in gve_rx_post_buffers_dqo()
367 if (rx->dqo.hdr_bufs.data) in gve_rx_post_buffers_dqo()
369 cpu_to_le64(rx->dqo.hdr_bufs.addr + in gve_rx_post_buffers_dqo()
377 gve_rx_write_doorbell_dqo(priv, rx->q_num); in gve_rx_post_buffers_dqo()
380 rx->fill_cnt += num_posted; in gve_rx_post_buffers_dqo()
431 static void gve_rx_free_skb(struct napi_struct *napi, struct gve_rx_ring *rx) in gve_rx_free_skb() argument
433 if (!rx->ctx.skb_head) in gve_rx_free_skb()
436 if (rx->ctx.skb_head == napi->skb) in gve_rx_free_skb()
438 dev_kfree_skb_any(rx->ctx.skb_head); in gve_rx_free_skb()
439 rx->ctx.skb_head = NULL; in gve_rx_free_skb()
440 rx->ctx.skb_tail = NULL; in gve_rx_free_skb()
443 static bool gve_rx_should_trigger_copy_ondemand(struct gve_rx_ring *rx) in gve_rx_should_trigger_copy_ondemand() argument
445 if (!rx->dqo.qpl) in gve_rx_should_trigger_copy_ondemand()
447 if (rx->dqo.used_buf_states_cnt < in gve_rx_should_trigger_copy_ondemand()
448 (rx->dqo.num_buf_states - in gve_rx_should_trigger_copy_ondemand()
454 static int gve_rx_copy_ondemand(struct gve_rx_ring *rx, in gve_rx_copy_ondemand() argument
468 num_frags = skb_shinfo(rx->ctx.skb_tail)->nr_frags; in gve_rx_copy_ondemand()
469 skb_add_rx_frag(rx->ctx.skb_tail, num_frags, page, in gve_rx_copy_ondemand()
472 u64_stats_update_begin(&rx->statss); in gve_rx_copy_ondemand()
473 rx->rx_frag_alloc_cnt++; in gve_rx_copy_ondemand()
474 u64_stats_update_end(&rx->statss); in gve_rx_copy_ondemand()
476 gve_enqueue_buf_state(rx, &rx->dqo.recycled_buf_states, buf_state); in gve_rx_copy_ondemand()
480 /* Chains multi skbs for single rx packet.
485 u16 buf_len, struct gve_rx_ring *rx, in gve_rx_append_frags() argument
488 int num_frags = skb_shinfo(rx->ctx.skb_tail)->nr_frags; in gve_rx_append_frags()
497 if (rx->dqo.page_pool) in gve_rx_append_frags()
500 if (rx->ctx.skb_tail == rx->ctx.skb_head) in gve_rx_append_frags()
501 skb_shinfo(rx->ctx.skb_head)->frag_list = skb; in gve_rx_append_frags()
503 rx->ctx.skb_tail->next = skb; in gve_rx_append_frags()
504 rx->ctx.skb_tail = skb; in gve_rx_append_frags()
507 if (rx->ctx.skb_tail != rx->ctx.skb_head) { in gve_rx_append_frags()
508 rx->ctx.skb_head->len += buf_len; in gve_rx_append_frags()
509 rx->ctx.skb_head->data_len += buf_len; in gve_rx_append_frags()
510 rx->ctx.skb_head->truesize += buf_state->page_info.buf_size; in gve_rx_append_frags()
514 if (gve_rx_should_trigger_copy_ondemand(rx)) in gve_rx_append_frags()
515 return gve_rx_copy_ondemand(rx, buf_state, buf_len); in gve_rx_append_frags()
517 skb_add_rx_frag(rx->ctx.skb_tail, num_frags, in gve_rx_append_frags()
521 gve_reuse_buffer(rx, buf_state); in gve_rx_append_frags()
529 static int gve_rx_dqo(struct napi_struct *napi, struct gve_rx_ring *rx, in gve_rx_dqo() argument
538 struct gve_priv *priv = rx->gve; in gve_rx_dqo()
542 if (unlikely(buffer_id >= rx->dqo.num_buf_states)) { in gve_rx_dqo()
543 net_err_ratelimited("%s: Invalid RX buffer_id=%u\n", in gve_rx_dqo()
547 buf_state = &rx->dqo.buf_states[buffer_id]; in gve_rx_dqo()
548 if (unlikely(!gve_buf_state_is_allocated(rx, buf_state))) { in gve_rx_dqo()
549 net_err_ratelimited("%s: RX buffer_id is not allocated: %u\n", in gve_rx_dqo()
555 gve_free_buffer(rx, buf_state); in gve_rx_dqo()
572 rx->ctx.skb_head = gve_rx_copy_data(priv->dev, napi, in gve_rx_dqo()
573 rx->dqo.hdr_bufs.data + in gve_rx_dqo()
576 if (unlikely(!rx->ctx.skb_head)) in gve_rx_dqo()
578 rx->ctx.skb_tail = rx->ctx.skb_head; in gve_rx_dqo()
580 if (rx->dqo.page_pool) in gve_rx_dqo()
581 skb_mark_for_recycle(rx->ctx.skb_head); in gve_rx_dqo()
585 u64_stats_update_begin(&rx->statss); in gve_rx_dqo()
586 rx->rx_hsplit_pkt++; in gve_rx_dqo()
587 rx->rx_hsplit_unsplit_pkt += unsplit; in gve_rx_dqo()
588 rx->rx_hsplit_bytes += hdr_len; in gve_rx_dqo()
589 u64_stats_update_end(&rx->statss); in gve_rx_dqo()
598 if (rx->ctx.skb_head) { in gve_rx_dqo()
599 if (unlikely(gve_rx_append_frags(napi, buf_state, buf_len, rx, in gve_rx_dqo()
607 rx->ctx.skb_head = gve_rx_copy(priv->dev, napi, in gve_rx_dqo()
609 if (unlikely(!rx->ctx.skb_head)) in gve_rx_dqo()
611 rx->ctx.skb_tail = rx->ctx.skb_head; in gve_rx_dqo()
613 u64_stats_update_begin(&rx->statss); in gve_rx_dqo()
614 rx->rx_copied_pkt++; in gve_rx_dqo()
615 rx->rx_copybreak_pkt++; in gve_rx_dqo()
616 u64_stats_update_end(&rx->statss); in gve_rx_dqo()
618 gve_free_buffer(rx, buf_state); in gve_rx_dqo()
622 rx->ctx.skb_head = napi_get_frags(napi); in gve_rx_dqo()
623 if (unlikely(!rx->ctx.skb_head)) in gve_rx_dqo()
625 rx->ctx.skb_tail = rx->ctx.skb_head; in gve_rx_dqo()
627 if (gve_rx_should_trigger_copy_ondemand(rx)) { in gve_rx_dqo()
628 if (gve_rx_copy_ondemand(rx, buf_state, buf_len) < 0) in gve_rx_dqo()
633 if (rx->dqo.page_pool) in gve_rx_dqo()
634 skb_mark_for_recycle(rx->ctx.skb_head); in gve_rx_dqo()
636 skb_add_rx_frag(rx->ctx.skb_head, 0, buf_state->page_info.page, in gve_rx_dqo()
639 gve_reuse_buffer(rx, buf_state); in gve_rx_dqo()
643 gve_free_buffer(rx, buf_state); in gve_rx_dqo()
673 static int gve_rx_complete_skb(struct gve_rx_ring *rx, struct napi_struct *napi, in gve_rx_complete_skb() argument
678 rx->gve->ptype_lut_dqo->ptypes[desc->packet_type]; in gve_rx_complete_skb()
681 skb_record_rx_queue(rx->ctx.skb_head, rx->q_num); in gve_rx_complete_skb()
684 gve_rx_skb_hash(rx->ctx.skb_head, desc, ptype); in gve_rx_complete_skb()
687 gve_rx_skb_csum(rx->ctx.skb_head, desc, ptype); in gve_rx_complete_skb()
693 err = gve_rx_complete_rsc(rx->ctx.skb_head, desc, ptype); in gve_rx_complete_skb()
698 if (skb_headlen(rx->ctx.skb_head) == 0) in gve_rx_complete_skb()
701 napi_gro_receive(napi, rx->ctx.skb_head); in gve_rx_complete_skb()
711 struct gve_rx_ring *rx = block->rx; in gve_rx_poll_dqo() local
712 struct gve_rx_compl_queue_dqo *complq = &rx->dqo.complq; in gve_rx_poll_dqo()
734 err = gve_rx_dqo(napi, rx, compl_desc, complq->head, rx->q_num); in gve_rx_poll_dqo()
736 gve_rx_free_skb(napi, rx); in gve_rx_poll_dqo()
737 u64_stats_update_begin(&rx->statss); in gve_rx_poll_dqo()
739 rx->rx_skb_alloc_fail++; in gve_rx_poll_dqo()
741 rx->rx_desc_err_dropped_pkt++; in gve_rx_poll_dqo()
742 u64_stats_update_end(&rx->statss); in gve_rx_poll_dqo()
755 struct gve_rx_buf_queue_dqo *bufq = &rx->dqo.bufq; in gve_rx_poll_dqo()
761 rx->cnt++; in gve_rx_poll_dqo()
763 if (!rx->ctx.skb_head) in gve_rx_poll_dqo()
770 pkt_bytes = rx->ctx.skb_head->len; in gve_rx_poll_dqo()
774 if (skb_headlen(rx->ctx.skb_head)) in gve_rx_poll_dqo()
778 if (gve_rx_complete_skb(rx, napi, compl_desc, feat) != 0) { in gve_rx_poll_dqo()
779 gve_rx_free_skb(napi, rx); in gve_rx_poll_dqo()
780 u64_stats_update_begin(&rx->statss); in gve_rx_poll_dqo()
781 rx->rx_desc_err_dropped_pkt++; in gve_rx_poll_dqo()
782 u64_stats_update_end(&rx->statss); in gve_rx_poll_dqo()
787 rx->ctx.skb_head = NULL; in gve_rx_poll_dqo()
788 rx->ctx.skb_tail = NULL; in gve_rx_poll_dqo()
791 gve_rx_post_buffers_dqo(rx); in gve_rx_poll_dqo()
793 u64_stats_update_begin(&rx->statss); in gve_rx_poll_dqo()
794 rx->rpackets += work_done; in gve_rx_poll_dqo()
795 rx->rbytes += bytes; in gve_rx_poll_dqo()
796 u64_stats_update_end(&rx->statss); in gve_rx_poll_dqo()