Lines Matching refs:pfvf
32 static bool otx2_xdp_rcv_pkt_handler(struct otx2_nic *pfvf,
49 static int otx2_nix_cq_op_status(struct otx2_nic *pfvf, in otx2_nix_cq_op_status() argument
55 status = otx2_atomic64_fetch_add(incr, pfvf->cq_op_addr); in otx2_nix_cq_op_status()
59 dev_err(pfvf->dev, "CQ stopped due to error"); in otx2_nix_cq_op_status()
97 static void otx2_xdp_snd_pkt_handler(struct otx2_nic *pfvf, in otx2_xdp_snd_pkt_handler() argument
108 pa = otx2_iova_to_phys(pfvf->iommu_domain, sg->dma_addr[0]); in otx2_xdp_snd_pkt_handler()
109 otx2_dma_unmap_page(pfvf, sg->dma_addr[0], in otx2_xdp_snd_pkt_handler()
115 static void otx2_snd_pkt_handler(struct otx2_nic *pfvf, in otx2_snd_pkt_handler() argument
128 if (unlikely(snd_comp->status) && netif_msg_tx_err(pfvf)) in otx2_snd_pkt_handler()
130 pfvf->netdev->name, cq->cint_idx, in otx2_snd_pkt_handler()
141 timestamp = pfvf->ptp->convert_tx_ptp_tstmp(timestamp); in otx2_snd_pkt_handler()
142 err = otx2_ptp_tstamp2time(pfvf, timestamp, &tsns); in otx2_snd_pkt_handler()
153 otx2_dma_unmap_skb_frags(pfvf, sg); in otx2_snd_pkt_handler()
158 static void otx2_set_rxtstamp(struct otx2_nic *pfvf, in otx2_set_rxtstamp() argument
164 if (!(pfvf->flags & OTX2_FLAG_RX_TSTAMP_ENABLED)) in otx2_set_rxtstamp()
167 timestamp = pfvf->ptp->convert_rx_ptp_tstmp(*(u64 *)data); in otx2_set_rxtstamp()
169 err = otx2_ptp_tstamp2time(pfvf, timestamp, &tsns); in otx2_set_rxtstamp()
176 static bool otx2_skb_add_frag(struct otx2_nic *pfvf, struct sk_buff *skb, in otx2_skb_add_frag() argument
184 va = phys_to_virt(otx2_iova_to_phys(pfvf->iommu_domain, iova)); in otx2_skb_add_frag()
193 otx2_set_rxtstamp(pfvf, skb, va); in otx2_skb_add_frag()
202 len - off, pfvf->rbsize); in otx2_skb_add_frag()
209 pfvf->hw_ops->aura_freeptr(pfvf, qidx, iova & ~0x07ULL); in otx2_skb_add_frag()
214 static void otx2_set_rxhash(struct otx2_nic *pfvf, in otx2_set_rxhash() argument
221 if (!(pfvf->netdev->features & NETIF_F_RXHASH)) in otx2_set_rxhash()
224 rss = &pfvf->hw.rss_info; in otx2_set_rxhash()
236 static void otx2_free_rcv_seg(struct otx2_nic *pfvf, struct nix_cqe_rx_s *cqe, in otx2_free_rcv_seg() argument
250 pfvf->hw_ops->aura_freeptr(pfvf, qidx, in otx2_free_rcv_seg()
256 static bool otx2_check_rcv_errors(struct otx2_nic *pfvf, in otx2_check_rcv_errors() argument
259 struct otx2_drv_stats *stats = &pfvf->hw.drv_stats; in otx2_check_rcv_errors()
262 if (netif_msg_rx_err(pfvf)) in otx2_check_rcv_errors()
263 netdev_err(pfvf->netdev, in otx2_check_rcv_errors()
311 if (pfvf->netdev->features & NETIF_F_RXALL) in otx2_check_rcv_errors()
316 otx2_free_rcv_seg(pfvf, cqe, qidx); in otx2_check_rcv_errors()
320 static void otx2_rcv_pkt_handler(struct otx2_nic *pfvf, in otx2_rcv_pkt_handler() argument
334 if (otx2_check_rcv_errors(pfvf, cqe, cq->cq_idx)) in otx2_rcv_pkt_handler()
338 if (pfvf->xdp_prog) in otx2_rcv_pkt_handler()
339 if (otx2_xdp_rcv_pkt_handler(pfvf, pfvf->xdp_prog, cqe, cq, need_xdp_flush)) in otx2_rcv_pkt_handler()
353 if (otx2_skb_add_frag(pfvf, skb, *seg_addr, in otx2_rcv_pkt_handler()
359 otx2_set_rxhash(pfvf, cqe, skb); in otx2_rcv_pkt_handler()
361 if (!(pfvf->flags & OTX2_FLAG_REP_MODE_ENABLED)) { in otx2_rcv_pkt_handler()
363 if (pfvf->netdev->features & NETIF_F_RXCSUM) in otx2_rcv_pkt_handler()
367 if (pfvf->flags & OTX2_FLAG_TC_MARK_ENABLED) in otx2_rcv_pkt_handler()
375 static int otx2_rx_napi_handler(struct otx2_nic *pfvf, in otx2_rx_napi_handler() argument
386 if (otx2_nix_cq_op_status(pfvf, cq) || !cq->pend_cqe) in otx2_rx_napi_handler()
401 otx2_rcv_pkt_handler(pfvf, napi, cq, cqe, &need_xdp_flush); in otx2_rx_napi_handler()
412 otx2_write64(pfvf, NIX_LF_CQ_OP_DOOR, in otx2_rx_napi_handler()
420 struct otx2_nic *pfvf = dev; in otx2_refill_pool_ptrs() local
425 if (otx2_alloc_buffer(pfvf, cq, &bufptr)) in otx2_refill_pool_ptrs()
427 otx2_aura_freeptr(pfvf, cq->cq_idx, bufptr + OTX2_HEAD_ROOM); in otx2_refill_pool_ptrs()
434 static int otx2_tx_napi_handler(struct otx2_nic *pfvf, in otx2_tx_napi_handler() argument
446 if (otx2_nix_cq_op_status(pfvf, cq) || !cq->pend_cqe) in otx2_tx_napi_handler()
450 qidx = cq->cq_idx - pfvf->hw.rx_queues; in otx2_tx_napi_handler()
451 sq = &pfvf->qset.sq[qidx]; in otx2_tx_napi_handler()
461 qidx = cq->cq_idx - pfvf->hw.rx_queues; in otx2_tx_napi_handler()
464 otx2_xdp_snd_pkt_handler(pfvf, sq, cqe); in otx2_tx_napi_handler()
466 otx2_snd_pkt_handler(pfvf, cq, &pfvf->qset.sq[qidx], in otx2_tx_napi_handler()
478 otx2_write64(pfvf, NIX_LF_CQ_OP_DOOR, in otx2_tx_napi_handler()
482 if (pfvf->flags & OTX2_FLAG_REP_MODE_ENABLED) in otx2_tx_napi_handler()
483 ndev = pfvf->reps[qidx]->netdev; in otx2_tx_napi_handler()
486 ndev = pfvf->netdev; in otx2_tx_napi_handler()
491 qidx = cq->cq_idx - pfvf->hw.rx_queues; in otx2_tx_napi_handler()
493 if (qidx >= pfvf->hw.tx_queues) in otx2_tx_napi_handler()
494 qidx -= pfvf->hw.xdp_queues; in otx2_tx_napi_handler()
495 if (pfvf->flags & OTX2_FLAG_REP_MODE_ENABLED) in otx2_tx_napi_handler()
508 static void otx2_adjust_adaptive_coalese(struct otx2_nic *pfvf, struct otx2_cq_poll *cq_poll) in otx2_adjust_adaptive_coalese() argument
520 dim_update_sample(pfvf->napi_events, in otx2_adjust_adaptive_coalese()
534 struct otx2_nic *pfvf; in otx2_napi_handler() local
538 pfvf = (struct otx2_nic *)cq_poll->dev; in otx2_napi_handler()
539 qset = &pfvf->qset; in otx2_napi_handler()
548 workdone += otx2_rx_napi_handler(pfvf, napi, in otx2_napi_handler()
551 workdone += otx2_tx_napi_handler(pfvf, cq, budget); in otx2_napi_handler()
556 filled_cnt = pfvf->hw_ops->refill_pool_ptrs(pfvf, rx_cq); in otx2_napi_handler()
558 otx2_write64(pfvf, NIX_LF_CINTX_INT(cq_poll->cint_idx), BIT_ULL(0)); in otx2_napi_handler()
562 if (pfvf->flags & OTX2_FLAG_INTF_DOWN) in otx2_napi_handler()
566 if (pfvf->flags & OTX2_FLAG_ADPTV_INT_COAL_ENABLED) in otx2_napi_handler()
567 otx2_adjust_adaptive_coalese(pfvf, cq_poll); in otx2_napi_handler()
573 work = &pfvf->refill_wrk[cq->cq_idx]; in otx2_napi_handler()
584 otx2_write64(pfvf, in otx2_napi_handler()
611 static bool otx2_sqe_add_sg(struct otx2_nic *pfvf, struct otx2_snd_queue *sq, in otx2_sqe_add_sg() argument
637 dma_addr = otx2_dma_map_skb_frag(pfvf, skb, seg, &len); in otx2_sqe_add_sg()
638 if (dma_mapping_error(pfvf->dev, dma_addr)) in otx2_sqe_add_sg()
656 static void otx2_sqe_add_ext(struct otx2_nic *pfvf, struct otx2_snd_queue *sq, in otx2_sqe_add_ext() argument
670 ext->lso_format = pfvf->hw.lso_tsov4_idx; in otx2_sqe_add_ext()
679 ext->lso_format = pfvf->hw.lso_tsov6_idx; in otx2_sqe_add_ext()
696 ext->lso_format = pfvf->hw.lso_udpv4_idx; in otx2_sqe_add_ext()
699 ext->lso_format = pfvf->hw.lso_udpv6_idx; in otx2_sqe_add_ext()
747 static void otx2_sqe_add_hdr(struct otx2_nic *pfvf, struct otx2_snd_queue *sq, in otx2_sqe_add_hdr() argument
762 sqe_hdr->sq = (qidx >= pfvf->hw.tx_queues) ? in otx2_sqe_add_hdr()
763 qidx + pfvf->hw.xdp_queues : qidx; in otx2_sqe_add_hdr()
795 static int otx2_dma_map_tso_skb(struct otx2_nic *pfvf, in otx2_dma_map_tso_skb() argument
813 dma_addr = otx2_dma_map_skb_frag(pfvf, skb, seg, &len); in otx2_dma_map_tso_skb()
814 if (dma_mapping_error(pfvf->dev, dma_addr)) in otx2_dma_map_tso_skb()
824 otx2_dma_unmap_skb_frags(pfvf, sg); in otx2_dma_map_tso_skb()
877 static void otx2_sq_append_tso(struct otx2_nic *pfvf, struct otx2_snd_queue *sq, in otx2_sq_append_tso() argument
880 struct netdev_queue *txq = netdev_get_tx_queue(pfvf->netdev, qidx); in otx2_sq_append_tso()
892 if (otx2_dma_map_tso_skb(pfvf, sq, skb, first_sqe, hdr_len)) { in otx2_sq_append_tso()
909 otx2_sqe_add_hdr(pfvf, sq, sqe_hdr, skb, qidx); in otx2_sq_append_tso()
958 pfvf->hw_ops->sqe_flush(pfvf, sq, offset, qidx); in otx2_sq_append_tso()
962 static bool is_hw_tso_supported(struct otx2_nic *pfvf, in is_hw_tso_supported() argument
967 if (test_bit(HW_TSO, &pfvf->hw.cap_flag)) in is_hw_tso_supported()
971 if (!is_96xx_B0(pfvf->pdev)) in is_hw_tso_supported()
987 static int otx2_get_sqe_count(struct otx2_nic *pfvf, struct sk_buff *skb) in otx2_get_sqe_count() argument
993 if (is_hw_tso_supported(pfvf, skb)) in otx2_get_sqe_count()
1075 static void otx2_set_txtstamp(struct otx2_nic *pfvf, struct sk_buff *skb, in otx2_set_txtstamp() argument
1089 if (unlikely(pfvf->flags & OTX2_FLAG_PTP_ONESTEP_SYNC && in otx2_set_txtstamp()
1094 ts = ns_to_timespec64(pfvf->ptp->tstamp); in otx2_set_txtstamp()
1132 ptp_offset, pfvf->ptp->base_ns, udp_csum_crt); in otx2_set_txtstamp()
1144 struct otx2_nic *pfvf = dev; in otx2_sq_append_skb() local
1154 if (free_desc < otx2_get_sqe_count(pfvf, skb)) in otx2_sq_append_skb()
1171 if (skb_shinfo(skb)->gso_size && !is_hw_tso_supported(pfvf, skb)) { in otx2_sq_append_skb()
1178 otx2_sq_append_tso(pfvf, sq, skb, qidx); in otx2_sq_append_skb()
1190 otx2_sqe_add_hdr(pfvf, sq, sqe_hdr, skb, qidx); in otx2_sq_append_skb()
1194 otx2_sqe_add_ext(pfvf, sq, skb, &offset); in otx2_sq_append_skb()
1199 ret = otx2_sqe_add_sg_ipsec(pfvf, sq, skb, num_segs, &offset); in otx2_sq_append_skb()
1201 ret = otx2_sqe_add_sg(pfvf, sq, skb, num_segs, &offset); in otx2_sq_append_skb()
1204 otx2_dma_unmap_skb_frags(pfvf, &sq->sg[sq->head]); in otx2_sq_append_skb()
1208 otx2_set_txtstamp(pfvf, skb, sq, &offset); in otx2_sq_append_skb()
1214 return cn10k_ipsec_transmit(pfvf, txq, sq, skb, num_segs, in otx2_sq_append_skb()
1220 pfvf->hw_ops->sqe_flush(pfvf, sq, offset, qidx); in otx2_sq_append_skb()
1225 void otx2_cleanup_rx_cqes(struct otx2_nic *pfvf, struct otx2_cq_queue *cq, int qidx) in otx2_cleanup_rx_cqes() argument
1233 if (pfvf->xdp_prog) in otx2_cleanup_rx_cqes()
1236 if (otx2_nix_cq_op_status(pfvf, cq) || !cq->pend_cqe) in otx2_cleanup_rx_cqes()
1239 pool_id = otx2_get_pool_idx(pfvf, AURA_NIX_RQ, qidx); in otx2_cleanup_rx_cqes()
1240 pool = &pfvf->qset.pool[pool_id]; in otx2_cleanup_rx_cqes()
1250 otx2_free_rcv_seg(pfvf, cqe, cq->cq_idx); in otx2_cleanup_rx_cqes()
1255 otx2_free_bufs(pfvf, pool, iova, pfvf->rbsize); in otx2_cleanup_rx_cqes()
1259 otx2_write64(pfvf, NIX_LF_CQ_OP_DOOR, in otx2_cleanup_rx_cqes()
1263 void otx2_cleanup_tx_cqes(struct otx2_nic *pfvf, struct otx2_cq_queue *cq) in otx2_cleanup_tx_cqes() argument
1274 qidx = cq->cq_idx - pfvf->hw.rx_queues; in otx2_cleanup_tx_cqes()
1275 sq = &pfvf->qset.sq[qidx]; in otx2_cleanup_tx_cqes()
1277 if (otx2_nix_cq_op_status(pfvf, cq) || !cq->pend_cqe) in otx2_cleanup_tx_cqes()
1292 otx2_dma_unmap_skb_frags(pfvf, sg); in otx2_cleanup_tx_cqes()
1299 if (qidx >= pfvf->hw.tx_queues) in otx2_cleanup_tx_cqes()
1300 qidx -= pfvf->hw.xdp_queues; in otx2_cleanup_tx_cqes()
1301 txq = netdev_get_tx_queue(pfvf->netdev, qidx); in otx2_cleanup_tx_cqes()
1305 otx2_write64(pfvf, NIX_LF_CQ_OP_DOOR, in otx2_cleanup_tx_cqes()
1309 int otx2_rxtx_enable(struct otx2_nic *pfvf, bool enable) in otx2_rxtx_enable() argument
1314 mutex_lock(&pfvf->mbox.lock); in otx2_rxtx_enable()
1316 msg = otx2_mbox_alloc_msg_nix_lf_start_rx(&pfvf->mbox); in otx2_rxtx_enable()
1318 msg = otx2_mbox_alloc_msg_nix_lf_stop_rx(&pfvf->mbox); in otx2_rxtx_enable()
1321 mutex_unlock(&pfvf->mbox.lock); in otx2_rxtx_enable()
1325 err = otx2_sync_mbox_msg(&pfvf->mbox); in otx2_rxtx_enable()
1326 mutex_unlock(&pfvf->mbox.lock); in otx2_rxtx_enable()
1330 void otx2_free_pending_sqe(struct otx2_nic *pfvf) in otx2_free_pending_sqe() argument
1339 for (sq_idx = 0; sq_idx < pfvf->hw.tx_queues; sq_idx++) { in otx2_free_pending_sqe()
1340 sq = &pfvf->qset.sq[sq_idx]; in otx2_free_pending_sqe()
1347 otx2_dma_unmap_skb_frags(pfvf, sg); in otx2_free_pending_sqe()
1355 txq = netdev_get_tx_queue(pfvf->netdev, sq_idx); in otx2_free_pending_sqe()
1382 bool otx2_xdp_sq_append_pkt(struct otx2_nic *pfvf, u64 iova, int len, u16 qidx) in otx2_xdp_sq_append_pkt() argument
1388 sq = &pfvf->qset.sq[qidx]; in otx2_xdp_sq_append_pkt()
1410 pfvf->hw_ops->sqe_flush(pfvf, sq, offset, qidx); in otx2_xdp_sq_append_pkt()
1415 static bool otx2_xdp_rcv_pkt_handler(struct otx2_nic *pfvf, in otx2_xdp_rcv_pkt_handler() argument
1430 pa = otx2_iova_to_phys(pfvf->iommu_domain, iova); in otx2_xdp_rcv_pkt_handler()
1433 xdp_init_buff(&xdp, pfvf->rbsize, &cq->xdp_rxq); in otx2_xdp_rcv_pkt_handler()
1445 qidx += pfvf->hw.tx_queues; in otx2_xdp_rcv_pkt_handler()
1447 return otx2_xdp_sq_append_pkt(pfvf, iova, in otx2_xdp_rcv_pkt_handler()
1451 err = xdp_do_redirect(pfvf->netdev, &xdp, prog); in otx2_xdp_rcv_pkt_handler()
1453 otx2_dma_unmap_page(pfvf, iova, pfvf->rbsize, in otx2_xdp_rcv_pkt_handler()
1462 bpf_warn_invalid_xdp_action(pfvf->netdev, prog, act); in otx2_xdp_rcv_pkt_handler()
1465 trace_xdp_exception(pfvf->netdev, prog, act); in otx2_xdp_rcv_pkt_handler()
1468 otx2_dma_unmap_page(pfvf, iova, pfvf->rbsize, in otx2_xdp_rcv_pkt_handler()