Lines Matching +full:region +full:- +full:freeze +full:- +full:timeout +full:- +full:us

1 // SPDX-License-Identifier: GPL-2.0-or-later
5 * This driver is designed for the non-CPM ethernet controllers
13 * Copyright 2002-2009, 2011-2013 Freescale Semiconductor, Inc.
20 * B-V +1.62
25 * is therefore conveyed through an OF-style device tree.
45 * pre-allocated skb, and so after the skb is passed up to the
93 #include <linux/dma-mapping.h>
114 bdp->bufPtr = cpu_to_be32(buf); in gfar_init_rxbdp()
117 if (bdp == rx_queue->rx_bd_base + rx_queue->rx_ring_size - 1) in gfar_init_rxbdp()
122 bdp->lstatus = cpu_to_be32(lstatus); in gfar_init_rxbdp()
127 struct gfar __iomem *regs = priv->gfargrp[0].regs; in gfar_init_tx_rx_base()
131 baddr = &regs->tbase0; in gfar_init_tx_rx_base()
132 for (i = 0; i < priv->num_tx_queues; i++) { in gfar_init_tx_rx_base()
133 gfar_write(baddr, priv->tx_queue[i]->tx_bd_dma_base); in gfar_init_tx_rx_base()
137 baddr = &regs->rbase0; in gfar_init_tx_rx_base()
138 for (i = 0; i < priv->num_rx_queues; i++) { in gfar_init_tx_rx_base()
139 gfar_write(baddr, priv->rx_queue[i]->rx_bd_dma_base); in gfar_init_tx_rx_base()
146 struct gfar __iomem *regs = priv->gfargrp[0].regs; in gfar_init_rqprm()
150 baddr = &regs->rqprm0; in gfar_init_rqprm()
151 for (i = 0; i < priv->num_rx_queues; i++) { in gfar_init_rqprm()
152 gfar_write(baddr, priv->rx_queue[i]->rx_ring_size | in gfar_init_rqprm()
161 priv->uses_rxfcb = 0; in gfar_rx_offload_en()
163 if (priv->ndev->features & (NETIF_F_RXCSUM | NETIF_F_HW_VLAN_CTAG_RX)) in gfar_rx_offload_en()
164 priv->uses_rxfcb = 1; in gfar_rx_offload_en()
166 if (priv->hwts_rx_en || priv->rx_filer_enable) in gfar_rx_offload_en()
167 priv->uses_rxfcb = 1; in gfar_rx_offload_en()
172 struct gfar __iomem *regs = priv->gfargrp[0].regs; in gfar_mac_rx_config()
175 if (priv->rx_filer_enable) { in gfar_mac_rx_config()
178 gfar_write(&regs->rir0, DEFAULT_2RXQ_RIR0); in gfar_mac_rx_config()
182 if (priv->ndev->flags & IFF_PROMISC) in gfar_mac_rx_config()
185 if (priv->ndev->features & NETIF_F_RXCSUM) in gfar_mac_rx_config()
188 if (priv->extended_hash) in gfar_mac_rx_config()
191 if (priv->padding) { in gfar_mac_rx_config()
193 rctrl |= RCTRL_PADDING(priv->padding); in gfar_mac_rx_config()
197 if (priv->hwts_rx_en) in gfar_mac_rx_config()
200 if (priv->ndev->features & NETIF_F_HW_VLAN_CTAG_RX) in gfar_mac_rx_config()
204 gfar_write(&regs->rctrl, rctrl); in gfar_mac_rx_config()
207 gfar_write(&regs->ptv, DEFAULT_LFC_PTVVAL); in gfar_mac_rx_config()
211 gfar_write(&regs->rctrl, rctrl); in gfar_mac_rx_config()
216 struct gfar __iomem *regs = priv->gfargrp[0].regs; in gfar_mac_tx_config()
219 if (priv->ndev->features & NETIF_F_IP_CSUM) in gfar_mac_tx_config()
222 if (priv->prio_sched_en) in gfar_mac_tx_config()
226 gfar_write(&regs->tr03wt, DEFAULT_WRRS_WEIGHT); in gfar_mac_tx_config()
227 gfar_write(&regs->tr47wt, DEFAULT_WRRS_WEIGHT); in gfar_mac_tx_config()
230 if (priv->ndev->features & NETIF_F_HW_VLAN_CTAG_TX) in gfar_mac_tx_config()
233 gfar_write(&regs->tctrl, tctrl); in gfar_mac_tx_config()
239 struct gfar __iomem *regs = priv->gfargrp[0].regs; in gfar_configure_coalescing()
242 if (priv->mode == MQ_MG_MODE) { in gfar_configure_coalescing()
245 baddr = &regs->txic0; in gfar_configure_coalescing()
246 for_each_set_bit(i, &tx_mask, priv->num_tx_queues) { in gfar_configure_coalescing()
248 if (likely(priv->tx_queue[i]->txcoalescing)) in gfar_configure_coalescing()
249 gfar_write(baddr + i, priv->tx_queue[i]->txic); in gfar_configure_coalescing()
252 baddr = &regs->rxic0; in gfar_configure_coalescing()
253 for_each_set_bit(i, &rx_mask, priv->num_rx_queues) { in gfar_configure_coalescing()
255 if (likely(priv->rx_queue[i]->rxcoalescing)) in gfar_configure_coalescing()
256 gfar_write(baddr + i, priv->rx_queue[i]->rxic); in gfar_configure_coalescing()
259 /* Backward compatible case -- even if we enable in gfar_configure_coalescing()
262 gfar_write(&regs->txic, 0); in gfar_configure_coalescing()
263 if (likely(priv->tx_queue[0]->txcoalescing)) in gfar_configure_coalescing()
264 gfar_write(&regs->txic, priv->tx_queue[0]->txic); in gfar_configure_coalescing()
266 gfar_write(&regs->rxic, 0); in gfar_configure_coalescing()
267 if (unlikely(priv->rx_queue[0]->rxcoalescing)) in gfar_configure_coalescing()
268 gfar_write(&regs->rxic, priv->rx_queue[0]->rxic); in gfar_configure_coalescing()
282 for (i = 0; i < priv->num_rx_queues; i++) { in gfar_get_stats64()
283 stats->rx_packets += priv->rx_queue[i]->stats.rx_packets; in gfar_get_stats64()
284 stats->rx_bytes += priv->rx_queue[i]->stats.rx_bytes; in gfar_get_stats64()
285 stats->rx_dropped += priv->rx_queue[i]->stats.rx_dropped; in gfar_get_stats64()
288 for (i = 0; i < priv->num_tx_queues; i++) { in gfar_get_stats64()
289 stats->tx_bytes += priv->tx_queue[i]->stats.tx_bytes; in gfar_get_stats64()
290 stats->tx_packets += priv->tx_queue[i]->stats.tx_packets; in gfar_get_stats64()
293 if (priv->device_flags & FSL_GIANFAR_DEV_HAS_RMON) { in gfar_get_stats64()
294 struct rmon_mib __iomem *rmon = &priv->gfargrp[0].regs->rmon; in gfar_get_stats64()
299 spin_lock_irqsave(&priv->rmon_overflow.lock, flags); in gfar_get_stats64()
300 car = gfar_read(&rmon->car1) & CAR1_C1RDR; in gfar_get_stats64()
303 rdrp = gfar_read(&rmon->rdrp); in gfar_get_stats64()
304 car = gfar_read(&rmon->car1) & CAR1_C1RDR; in gfar_get_stats64()
307 priv->rmon_overflow.rdrp++; in gfar_get_stats64()
308 gfar_write(&rmon->car1, car); in gfar_get_stats64()
310 rdrp_offset = priv->rmon_overflow.rdrp; in gfar_get_stats64()
311 spin_unlock_irqrestore(&priv->rmon_overflow.lock, flags); in gfar_get_stats64()
313 stats->rx_missed_errors = rdrp + (rdrp_offset << 16); in gfar_get_stats64()
322 * 2) Use the 8 most significant bits as a hash into a 256-entry
323 * table. The table is controlled through 8 32-bit registers:
324 * gaddr0-7. gaddr0's MSB is entry 0, and gaddr7's LSB is
336 int width = priv->hash_width; in gfar_set_hash_for_addr()
337 u8 whichbit = (result >> (32 - width)) & 0x1f; in gfar_set_hash_for_addr()
338 u8 whichreg = result >> (32 - width + 5); in gfar_set_hash_for_addr()
339 u32 value = (1 << (31-whichbit)); in gfar_set_hash_for_addr()
341 tempval = gfar_read(priv->hash_regs[whichreg]); in gfar_set_hash_for_addr()
343 gfar_write(priv->hash_regs[whichreg], tempval); in gfar_set_hash_for_addr()
353 struct gfar __iomem *regs = priv->gfargrp[0].regs; in gfar_set_mac_for_addr()
355 u32 __iomem *macptr = &regs->macstnaddr1; in gfar_set_mac_for_addr()
381 gfar_set_mac_for_addr(dev, 0, dev->dev_addr); in gfar_set_mac_addr()
389 for (i = 0; i < priv->num_grps; i++) { in gfar_ints_disable()
390 struct gfar __iomem *regs = priv->gfargrp[i].regs; in gfar_ints_disable()
392 gfar_write(&regs->ievent, IEVENT_INIT_CLEAR); in gfar_ints_disable()
395 gfar_write(&regs->imask, IMASK_INIT_CLEAR); in gfar_ints_disable()
402 for (i = 0; i < priv->num_grps; i++) { in gfar_ints_enable()
403 struct gfar __iomem *regs = priv->gfargrp[i].regs; in gfar_ints_enable()
405 gfar_write(&regs->imask, in gfar_ints_enable()
406 IMASK_DEFAULT | priv->rmon_overflow.imask); in gfar_ints_enable()
414 for (i = 0; i < priv->num_tx_queues; i++) { in gfar_alloc_tx_queues()
415 priv->tx_queue[i] = kzalloc(sizeof(struct gfar_priv_tx_q), in gfar_alloc_tx_queues()
417 if (!priv->tx_queue[i]) in gfar_alloc_tx_queues()
418 return -ENOMEM; in gfar_alloc_tx_queues()
420 priv->tx_queue[i]->tx_skbuff = NULL; in gfar_alloc_tx_queues()
421 priv->tx_queue[i]->qindex = i; in gfar_alloc_tx_queues()
422 priv->tx_queue[i]->dev = priv->ndev; in gfar_alloc_tx_queues()
423 spin_lock_init(&(priv->tx_queue[i]->txlock)); in gfar_alloc_tx_queues()
432 for (i = 0; i < priv->num_rx_queues; i++) { in gfar_alloc_rx_queues()
433 priv->rx_queue[i] = kzalloc(sizeof(struct gfar_priv_rx_q), in gfar_alloc_rx_queues()
435 if (!priv->rx_queue[i]) in gfar_alloc_rx_queues()
436 return -ENOMEM; in gfar_alloc_rx_queues()
438 priv->rx_queue[i]->qindex = i; in gfar_alloc_rx_queues()
439 priv->rx_queue[i]->ndev = priv->ndev; in gfar_alloc_rx_queues()
448 for (i = 0; i < priv->num_tx_queues; i++) in gfar_free_tx_queues()
449 kfree(priv->tx_queue[i]); in gfar_free_tx_queues()
456 for (i = 0; i < priv->num_rx_queues; i++) in gfar_free_rx_queues()
457 kfree(priv->rx_queue[i]); in gfar_free_rx_queues()
465 if (priv->gfargrp[i].regs) in unmap_group_regs()
466 iounmap(priv->gfargrp[i].regs); in unmap_group_regs()
473 for (i = 0; i < priv->num_grps; i++) in free_gfar_dev()
475 kfree(priv->gfargrp[i].irqinfo[j]); in free_gfar_dev()
476 priv->gfargrp[i].irqinfo[j] = NULL; in free_gfar_dev()
479 free_netdev(priv->ndev); in free_gfar_dev()
486 for (i = 0; i < priv->num_grps; i++) { in disable_napi()
487 napi_disable(&priv->gfargrp[i].napi_rx); in disable_napi()
488 napi_disable(&priv->gfargrp[i].napi_tx); in disable_napi()
496 for (i = 0; i < priv->num_grps; i++) { in enable_napi()
497 napi_enable(&priv->gfargrp[i].napi_rx); in enable_napi()
498 napi_enable(&priv->gfargrp[i].napi_tx); in enable_napi()
505 struct gfar_priv_grp *grp = &priv->gfargrp[priv->num_grps]; in gfar_parse_group()
509 grp->irqinfo[i] = kzalloc(sizeof(struct gfar_irqinfo), in gfar_parse_group()
511 if (!grp->irqinfo[i]) in gfar_parse_group()
512 return -ENOMEM; in gfar_parse_group()
515 grp->regs = of_iomap(np, 0); in gfar_parse_group()
516 if (!grp->regs) in gfar_parse_group()
517 return -ENOMEM; in gfar_parse_group()
519 gfar_irq(grp, TX)->irq = irq_of_parse_and_map(np, 0); in gfar_parse_group()
523 gfar_irq(grp, RX)->irq = irq_of_parse_and_map(np, 1); in gfar_parse_group()
524 gfar_irq(grp, ER)->irq = irq_of_parse_and_map(np, 2); in gfar_parse_group()
525 if (!gfar_irq(grp, TX)->irq || in gfar_parse_group()
526 !gfar_irq(grp, RX)->irq || in gfar_parse_group()
527 !gfar_irq(grp, ER)->irq) in gfar_parse_group()
528 return -EINVAL; in gfar_parse_group()
531 grp->priv = priv; in gfar_parse_group()
532 spin_lock_init(&grp->grplock); in gfar_parse_group()
533 if (priv->mode == MQ_MG_MODE) { in gfar_parse_group()
535 grp->rx_bit_map = (DEFAULT_MAPPING >> priv->num_grps); in gfar_parse_group()
536 grp->tx_bit_map = (DEFAULT_MAPPING >> priv->num_grps); in gfar_parse_group()
538 grp->rx_bit_map = 0xFF; in gfar_parse_group()
539 grp->tx_bit_map = 0xFF; in gfar_parse_group()
545 grp->rx_bit_map = bitrev8(grp->rx_bit_map); in gfar_parse_group()
546 grp->tx_bit_map = bitrev8(grp->tx_bit_map); in gfar_parse_group()
551 for_each_set_bit(i, &grp->rx_bit_map, priv->num_rx_queues) { in gfar_parse_group()
552 if (!grp->rx_queue) in gfar_parse_group()
553 grp->rx_queue = priv->rx_queue[i]; in gfar_parse_group()
554 grp->num_rx_queues++; in gfar_parse_group()
555 grp->rstat |= (RSTAT_CLEAR_RHALT >> i); in gfar_parse_group()
556 priv->rqueue |= ((RQUEUE_EN0 | RQUEUE_EX0) >> i); in gfar_parse_group()
557 priv->rx_queue[i]->grp = grp; in gfar_parse_group()
560 for_each_set_bit(i, &grp->tx_bit_map, priv->num_tx_queues) { in gfar_parse_group()
561 if (!grp->tx_queue) in gfar_parse_group()
562 grp->tx_queue = priv->tx_queue[i]; in gfar_parse_group()
563 grp->num_tx_queues++; in gfar_parse_group()
564 grp->tstat |= (TSTAT_CLEAR_THALT >> i); in gfar_parse_group()
565 priv->tqueue |= (TQUEUE_EN0 >> i); in gfar_parse_group()
566 priv->tx_queue[i]->grp = grp; in gfar_parse_group()
569 priv->num_grps++; in gfar_parse_group()
580 if (of_node_name_eq(child, "queue-group")) in gfar_of_group_count()
592 struct gfar __iomem *regs = priv->gfargrp[0].regs; in gfar_get_interface()
595 ecntrl = gfar_read(&regs->ecntrl); in gfar_get_interface()
612 phy_interface_t interface = priv->interface; in gfar_get_interface()
624 if (priv->device_flags & FSL_GIANFAR_DEV_HAS_GIGABIT) in gfar_get_interface()
637 struct device_node *np = ofdev->dev.of_node; in gfar_of_init()
645 return -ENODEV; in gfar_of_init()
660 dev_err(&ofdev->dev, "Invalid # of int groups(%d)\n", in gfar_of_init()
663 return -EINVAL; in gfar_of_init()
674 return -EINVAL; in gfar_of_init()
681 return -EINVAL; in gfar_of_init()
687 return -ENOMEM; in gfar_of_init()
690 priv->ndev = dev; in gfar_of_init()
692 priv->mode = mode; in gfar_of_init()
694 priv->num_tx_queues = num_tx_qs; in gfar_of_init()
696 priv->num_rx_queues = num_rx_qs; in gfar_of_init()
713 INIT_LIST_HEAD(&priv->rx_list.list); in gfar_of_init()
714 priv->rx_list.count = 0; in gfar_of_init()
715 mutex_init(&priv->rx_queue_access); in gfar_of_init()
718 priv->gfargrp[i].regs = NULL; in gfar_of_init()
721 if (priv->mode == MQ_MG_MODE) { in gfar_of_init()
723 if (!of_node_name_eq(child, "queue-group")) in gfar_of_init()
738 if (of_property_read_bool(np, "bd-stash")) { in gfar_of_init()
739 priv->device_flags |= FSL_GIANFAR_DEV_HAS_BD_STASHING; in gfar_of_init()
740 priv->bd_stash_en = 1; in gfar_of_init()
743 err = of_property_read_u32(np, "rx-stash-len", &stash_len); in gfar_of_init()
746 priv->rx_stash_size = stash_len; in gfar_of_init()
748 err = of_property_read_u32(np, "rx-stash-idx", &stash_idx); in gfar_of_init()
751 priv->rx_stash_index = stash_idx; in gfar_of_init()
754 priv->device_flags |= FSL_GIANFAR_DEV_HAS_BUF_STASHING; in gfar_of_init()
757 if (err == -EPROBE_DEFER) in gfar_of_init()
761 dev_info(&ofdev->dev, "Using random MAC address: %pM\n", dev->dev_addr); in gfar_of_init()
765 priv->device_flags |= FSL_GIANFAR_DEV_HAS_GIGABIT | in gfar_of_init()
771 priv->device_flags |= FSL_GIANFAR_DEV_HAS_GIGABIT | in gfar_of_init()
783 * rgmii-id really needs to be specified. Other types can be in gfar_of_init()
788 priv->interface = interface; in gfar_of_init()
790 priv->interface = gfar_get_interface(dev); in gfar_of_init()
792 if (of_property_read_bool(np, "fsl,magic-packet")) in gfar_of_init()
793 priv->device_flags |= FSL_GIANFAR_DEV_HAS_MAGIC_PACKET; in gfar_of_init()
795 if (of_property_read_bool(np, "fsl,wake-on-filer")) in gfar_of_init()
796 priv->device_flags |= FSL_GIANFAR_DEV_HAS_WAKE_ON_FILER; in gfar_of_init()
798 priv->phy_node = of_parse_phandle(np, "phy-handle", 0); in gfar_of_init()
803 if (!priv->phy_node && of_phy_is_fixed_link(np)) { in gfar_of_init()
808 priv->phy_node = of_node_get(np); in gfar_of_init()
812 priv->tbi_node = of_parse_phandle(np, "tbi-handle", 0); in gfar_of_init()
832 rqfar--; in cluster_entry_per_class()
834 priv->ftp_rqfpr[rqfar] = rqfpr; in cluster_entry_per_class()
835 priv->ftp_rqfcr[rqfar] = rqfcr; in cluster_entry_per_class()
838 rqfar--; in cluster_entry_per_class()
840 priv->ftp_rqfpr[rqfar] = rqfpr; in cluster_entry_per_class()
841 priv->ftp_rqfcr[rqfar] = rqfcr; in cluster_entry_per_class()
844 rqfar--; in cluster_entry_per_class()
847 priv->ftp_rqfcr[rqfar] = rqfcr; in cluster_entry_per_class()
848 priv->ftp_rqfpr[rqfar] = rqfpr; in cluster_entry_per_class()
851 rqfar--; in cluster_entry_per_class()
854 priv->ftp_rqfcr[rqfar] = rqfcr; in cluster_entry_per_class()
855 priv->ftp_rqfpr[rqfar] = rqfpr; in cluster_entry_per_class()
870 priv->ftp_rqfcr[rqfar] = rqfcr; in gfar_init_filer_table()
871 priv->ftp_rqfpr[rqfar] = rqfpr; in gfar_init_filer_table()
881 /* cur_filer_idx indicated the first non-masked rule */ in gfar_init_filer_table()
882 priv->cur_filer_idx = rqfar; in gfar_init_filer_table()
887 priv->ftp_rqfcr[i] = rqfcr; in gfar_init_filer_table()
888 priv->ftp_rqfpr[i] = rqfpr; in gfar_init_filer_table()
904 priv->errata |= GFAR_ERRATA_74; in __gfar_detect_errata_83xx()
909 priv->errata |= GFAR_ERRATA_76; in __gfar_detect_errata_83xx()
913 priv->errata |= GFAR_ERRATA_12; in __gfar_detect_errata_83xx()
921 priv->errata |= GFAR_ERRATA_12; in __gfar_detect_errata_85xx()
926 priv->errata |= GFAR_ERRATA_76; /* aka eTSEC 20 */ in __gfar_detect_errata_85xx()
932 struct device *dev = &priv->ofdev->dev; in gfar_detect_errata()
935 priv->errata |= GFAR_ERRATA_A002; in gfar_detect_errata()
940 else /* non-mpc85xx parts, i.e. e300 core based */ in gfar_detect_errata()
944 if (priv->errata) in gfar_detect_errata()
946 priv->errata); in gfar_detect_errata()
951 struct gfar __iomem *regs = priv->gfargrp[0].regs; in gfar_init_addr_hash_table()
953 if (priv->device_flags & FSL_GIANFAR_DEV_HAS_EXTENDED_HASH) { in gfar_init_addr_hash_table()
954 priv->extended_hash = 1; in gfar_init_addr_hash_table()
955 priv->hash_width = 9; in gfar_init_addr_hash_table()
957 priv->hash_regs[0] = &regs->igaddr0; in gfar_init_addr_hash_table()
958 priv->hash_regs[1] = &regs->igaddr1; in gfar_init_addr_hash_table()
959 priv->hash_regs[2] = &regs->igaddr2; in gfar_init_addr_hash_table()
960 priv->hash_regs[3] = &regs->igaddr3; in gfar_init_addr_hash_table()
961 priv->hash_regs[4] = &regs->igaddr4; in gfar_init_addr_hash_table()
962 priv->hash_regs[5] = &regs->igaddr5; in gfar_init_addr_hash_table()
963 priv->hash_regs[6] = &regs->igaddr6; in gfar_init_addr_hash_table()
964 priv->hash_regs[7] = &regs->igaddr7; in gfar_init_addr_hash_table()
965 priv->hash_regs[8] = &regs->gaddr0; in gfar_init_addr_hash_table()
966 priv->hash_regs[9] = &regs->gaddr1; in gfar_init_addr_hash_table()
967 priv->hash_regs[10] = &regs->gaddr2; in gfar_init_addr_hash_table()
968 priv->hash_regs[11] = &regs->gaddr3; in gfar_init_addr_hash_table()
969 priv->hash_regs[12] = &regs->gaddr4; in gfar_init_addr_hash_table()
970 priv->hash_regs[13] = &regs->gaddr5; in gfar_init_addr_hash_table()
971 priv->hash_regs[14] = &regs->gaddr6; in gfar_init_addr_hash_table()
972 priv->hash_regs[15] = &regs->gaddr7; in gfar_init_addr_hash_table()
975 priv->extended_hash = 0; in gfar_init_addr_hash_table()
976 priv->hash_width = 8; in gfar_init_addr_hash_table()
978 priv->hash_regs[0] = &regs->gaddr0; in gfar_init_addr_hash_table()
979 priv->hash_regs[1] = &regs->gaddr1; in gfar_init_addr_hash_table()
980 priv->hash_regs[2] = &regs->gaddr2; in gfar_init_addr_hash_table()
981 priv->hash_regs[3] = &regs->gaddr3; in gfar_init_addr_hash_table()
982 priv->hash_regs[4] = &regs->gaddr4; in gfar_init_addr_hash_table()
983 priv->hash_regs[5] = &regs->gaddr5; in gfar_init_addr_hash_table()
984 priv->hash_regs[6] = &regs->gaddr6; in gfar_init_addr_hash_table()
985 priv->hash_regs[7] = &regs->gaddr7; in gfar_init_addr_hash_table()
999 /* Read the eTSEC register at offset 0xD1C. If bits 7-14 are in __gfar_is_rx_idle()
1000 * the same as bits 23-30, the eTSEC Rx is assumed to be idle in __gfar_is_rx_idle()
1003 res = gfar_read((void __iomem *)priv->gfargrp[0].regs + 0xd1c); in __gfar_is_rx_idle()
1014 struct gfar __iomem *regs = priv->gfargrp[0].regs; in gfar_halt_nodisable()
1016 unsigned int timeout; in gfar_halt_nodisable() local
1025 tempval = gfar_read(&regs->dmactrl); in gfar_halt_nodisable()
1027 gfar_write(&regs->dmactrl, tempval); in gfar_halt_nodisable()
1030 timeout = 1000; in gfar_halt_nodisable()
1031 while (!(stopped = gfar_is_dma_stopped(priv)) && timeout) { in gfar_halt_nodisable()
1033 timeout--; in gfar_halt_nodisable()
1036 if (!timeout) in gfar_halt_nodisable()
1047 struct gfar __iomem *regs = priv->gfargrp[0].regs; in gfar_halt()
1051 gfar_write(&regs->rqueue, 0); in gfar_halt()
1052 gfar_write(&regs->tqueue, 0); in gfar_halt()
1059 tempval = gfar_read(&regs->maccfg1); in gfar_halt()
1061 gfar_write(&regs->maccfg1, tempval); in gfar_halt()
1067 struct gfar_private *priv = netdev_priv(tx_queue->dev); in free_skb_tx_queue()
1070 txbdp = tx_queue->tx_bd_base; in free_skb_tx_queue()
1072 for (i = 0; i < tx_queue->tx_ring_size; i++) { in free_skb_tx_queue()
1073 if (!tx_queue->tx_skbuff[i]) in free_skb_tx_queue()
1076 dma_unmap_single(priv->dev, be32_to_cpu(txbdp->bufPtr), in free_skb_tx_queue()
1077 be16_to_cpu(txbdp->length), DMA_TO_DEVICE); in free_skb_tx_queue()
1078 txbdp->lstatus = 0; in free_skb_tx_queue()
1079 for (j = 0; j < skb_shinfo(tx_queue->tx_skbuff[i])->nr_frags; in free_skb_tx_queue()
1082 dma_unmap_page(priv->dev, be32_to_cpu(txbdp->bufPtr), in free_skb_tx_queue()
1083 be16_to_cpu(txbdp->length), in free_skb_tx_queue()
1087 dev_kfree_skb_any(tx_queue->tx_skbuff[i]); in free_skb_tx_queue()
1088 tx_queue->tx_skbuff[i] = NULL; in free_skb_tx_queue()
1090 kfree(tx_queue->tx_skbuff); in free_skb_tx_queue()
1091 tx_queue->tx_skbuff = NULL; in free_skb_tx_queue()
1098 struct rxbd8 *rxbdp = rx_queue->rx_bd_base; in free_skb_rx_queue()
1100 dev_kfree_skb(rx_queue->skb); in free_skb_rx_queue()
1102 for (i = 0; i < rx_queue->rx_ring_size; i++) { in free_skb_rx_queue()
1103 struct gfar_rx_buff *rxb = &rx_queue->rx_buff[i]; in free_skb_rx_queue()
1105 rxbdp->lstatus = 0; in free_skb_rx_queue()
1106 rxbdp->bufPtr = 0; in free_skb_rx_queue()
1109 if (!rxb->page) in free_skb_rx_queue()
1112 dma_unmap_page(rx_queue->dev, rxb->dma, in free_skb_rx_queue()
1114 __free_page(rxb->page); in free_skb_rx_queue()
1116 rxb->page = NULL; in free_skb_rx_queue()
1119 kfree(rx_queue->rx_buff); in free_skb_rx_queue()
1120 rx_queue->rx_buff = NULL; in free_skb_rx_queue()
1133 for (i = 0; i < priv->num_tx_queues; i++) { in free_skb_resources()
1136 tx_queue = priv->tx_queue[i]; in free_skb_resources()
1137 txq = netdev_get_tx_queue(tx_queue->dev, tx_queue->qindex); in free_skb_resources()
1138 if (tx_queue->tx_skbuff) in free_skb_resources()
1143 for (i = 0; i < priv->num_rx_queues; i++) { in free_skb_resources()
1144 rx_queue = priv->rx_queue[i]; in free_skb_resources()
1145 if (rx_queue->rx_buff) in free_skb_resources()
1149 dma_free_coherent(priv->dev, in free_skb_resources()
1150 sizeof(struct txbd8) * priv->total_tx_ring_size + in free_skb_resources()
1151 sizeof(struct rxbd8) * priv->total_rx_ring_size, in free_skb_resources()
1152 priv->tx_queue[0]->tx_bd_base, in free_skb_resources()
1153 priv->tx_queue[0]->tx_bd_dma_base); in free_skb_resources()
1163 set_bit(GFAR_DOWN, &priv->state); in stop_gfar()
1171 phy_stop(dev->phydev); in stop_gfar()
1178 struct gfar __iomem *regs = priv->gfargrp[0].regs; in gfar_start()
1183 gfar_write(&regs->rqueue, priv->rqueue); in gfar_start()
1184 gfar_write(&regs->tqueue, priv->tqueue); in gfar_start()
1187 tempval = gfar_read(&regs->dmactrl); in gfar_start()
1189 gfar_write(&regs->dmactrl, tempval); in gfar_start()
1192 tempval = gfar_read(&regs->dmactrl); in gfar_start()
1194 gfar_write(&regs->dmactrl, tempval); in gfar_start()
1196 for (i = 0; i < priv->num_grps; i++) { in gfar_start()
1197 regs = priv->gfargrp[i].regs; in gfar_start()
1199 gfar_write(&regs->tstat, priv->gfargrp[i].tstat); in gfar_start()
1200 gfar_write(&regs->rstat, priv->gfargrp[i].rstat); in gfar_start()
1204 tempval = gfar_read(&regs->maccfg1); in gfar_start()
1206 gfar_write(&regs->maccfg1, tempval); in gfar_start()
1210 netif_trans_update(priv->ndev); /* prevent tx timeout */ in gfar_start()
1222 addr = dma_map_page(rxq->dev, page, 0, PAGE_SIZE, DMA_FROM_DEVICE); in gfar_new_page()
1223 if (unlikely(dma_mapping_error(rxq->dev, addr))) { in gfar_new_page()
1229 rxb->dma = addr; in gfar_new_page()
1230 rxb->page = page; in gfar_new_page()
1231 rxb->page_offset = 0; in gfar_new_page()
1238 struct gfar_private *priv = netdev_priv(rx_queue->ndev); in gfar_rx_alloc_err()
1239 struct gfar_extra_stats *estats = &priv->extra_stats; in gfar_rx_alloc_err()
1241 netdev_err(rx_queue->ndev, "Can't alloc RX buffers\n"); in gfar_rx_alloc_err()
1242 atomic64_inc(&estats->rx_alloc_err); in gfar_rx_alloc_err()
1252 i = rx_queue->next_to_use; in gfar_alloc_rx_buffs()
1253 bdp = &rx_queue->rx_bd_base[i]; in gfar_alloc_rx_buffs()
1254 rxb = &rx_queue->rx_buff[i]; in gfar_alloc_rx_buffs()
1256 while (alloc_cnt--) { in gfar_alloc_rx_buffs()
1258 if (unlikely(!rxb->page)) { in gfar_alloc_rx_buffs()
1267 rxb->dma + rxb->page_offset + RXBUF_ALIGNMENT); in gfar_alloc_rx_buffs()
1273 if (unlikely(++i == rx_queue->rx_ring_size)) { in gfar_alloc_rx_buffs()
1275 bdp = rx_queue->rx_bd_base; in gfar_alloc_rx_buffs()
1276 rxb = rx_queue->rx_buff; in gfar_alloc_rx_buffs()
1280 rx_queue->next_to_use = i; in gfar_alloc_rx_buffs()
1281 rx_queue->next_to_alloc = i; in gfar_alloc_rx_buffs()
1287 struct gfar __iomem *regs = priv->gfargrp[0].regs; in gfar_init_bds()
1294 for (i = 0; i < priv->num_tx_queues; i++) { in gfar_init_bds()
1295 tx_queue = priv->tx_queue[i]; in gfar_init_bds()
1297 tx_queue->num_txbdfree = tx_queue->tx_ring_size; in gfar_init_bds()
1298 tx_queue->dirty_tx = tx_queue->tx_bd_base; in gfar_init_bds()
1299 tx_queue->cur_tx = tx_queue->tx_bd_base; in gfar_init_bds()
1300 tx_queue->skb_curtx = 0; in gfar_init_bds()
1301 tx_queue->skb_dirtytx = 0; in gfar_init_bds()
1304 txbdp = tx_queue->tx_bd_base; in gfar_init_bds()
1305 for (j = 0; j < tx_queue->tx_ring_size; j++) { in gfar_init_bds()
1306 txbdp->lstatus = 0; in gfar_init_bds()
1307 txbdp->bufPtr = 0; in gfar_init_bds()
1312 txbdp--; in gfar_init_bds()
1313 txbdp->status = cpu_to_be16(be16_to_cpu(txbdp->status) | in gfar_init_bds()
1317 rfbptr = &regs->rfbptr0; in gfar_init_bds()
1318 for (i = 0; i < priv->num_rx_queues; i++) { in gfar_init_bds()
1319 rx_queue = priv->rx_queue[i]; in gfar_init_bds()
1321 rx_queue->next_to_clean = 0; in gfar_init_bds()
1322 rx_queue->next_to_use = 0; in gfar_init_bds()
1323 rx_queue->next_to_alloc = 0; in gfar_init_bds()
1330 rx_queue->rfbptr = rfbptr; in gfar_init_bds()
1341 struct device *dev = priv->dev; in gfar_alloc_skb_resources()
1345 priv->total_tx_ring_size = 0; in gfar_alloc_skb_resources()
1346 for (i = 0; i < priv->num_tx_queues; i++) in gfar_alloc_skb_resources()
1347 priv->total_tx_ring_size += priv->tx_queue[i]->tx_ring_size; in gfar_alloc_skb_resources()
1349 priv->total_rx_ring_size = 0; in gfar_alloc_skb_resources()
1350 for (i = 0; i < priv->num_rx_queues; i++) in gfar_alloc_skb_resources()
1351 priv->total_rx_ring_size += priv->rx_queue[i]->rx_ring_size; in gfar_alloc_skb_resources()
1355 (priv->total_tx_ring_size * in gfar_alloc_skb_resources()
1357 (priv->total_rx_ring_size * in gfar_alloc_skb_resources()
1361 return -ENOMEM; in gfar_alloc_skb_resources()
1363 for (i = 0; i < priv->num_tx_queues; i++) { in gfar_alloc_skb_resources()
1364 tx_queue = priv->tx_queue[i]; in gfar_alloc_skb_resources()
1365 tx_queue->tx_bd_base = vaddr; in gfar_alloc_skb_resources()
1366 tx_queue->tx_bd_dma_base = addr; in gfar_alloc_skb_resources()
1367 tx_queue->dev = ndev; in gfar_alloc_skb_resources()
1369 addr += sizeof(struct txbd8) * tx_queue->tx_ring_size; in gfar_alloc_skb_resources()
1370 vaddr += sizeof(struct txbd8) * tx_queue->tx_ring_size; in gfar_alloc_skb_resources()
1374 for (i = 0; i < priv->num_rx_queues; i++) { in gfar_alloc_skb_resources()
1375 rx_queue = priv->rx_queue[i]; in gfar_alloc_skb_resources()
1376 rx_queue->rx_bd_base = vaddr; in gfar_alloc_skb_resources()
1377 rx_queue->rx_bd_dma_base = addr; in gfar_alloc_skb_resources()
1378 rx_queue->ndev = ndev; in gfar_alloc_skb_resources()
1379 rx_queue->dev = dev; in gfar_alloc_skb_resources()
1380 addr += sizeof(struct rxbd8) * rx_queue->rx_ring_size; in gfar_alloc_skb_resources()
1381 vaddr += sizeof(struct rxbd8) * rx_queue->rx_ring_size; in gfar_alloc_skb_resources()
1385 for (i = 0; i < priv->num_tx_queues; i++) { in gfar_alloc_skb_resources()
1386 tx_queue = priv->tx_queue[i]; in gfar_alloc_skb_resources()
1387 tx_queue->tx_skbuff = in gfar_alloc_skb_resources()
1388 kmalloc_array(tx_queue->tx_ring_size, in gfar_alloc_skb_resources()
1389 sizeof(*tx_queue->tx_skbuff), in gfar_alloc_skb_resources()
1391 if (!tx_queue->tx_skbuff) in gfar_alloc_skb_resources()
1394 for (j = 0; j < tx_queue->tx_ring_size; j++) in gfar_alloc_skb_resources()
1395 tx_queue->tx_skbuff[j] = NULL; in gfar_alloc_skb_resources()
1398 for (i = 0; i < priv->num_rx_queues; i++) { in gfar_alloc_skb_resources()
1399 rx_queue = priv->rx_queue[i]; in gfar_alloc_skb_resources()
1400 rx_queue->rx_buff = kcalloc(rx_queue->rx_ring_size, in gfar_alloc_skb_resources()
1401 sizeof(*rx_queue->rx_buff), in gfar_alloc_skb_resources()
1403 if (!rx_queue->rx_buff) in gfar_alloc_skb_resources()
1413 return -ENOMEM; in gfar_alloc_skb_resources()
1431 clear_bit(GFAR_DOWN, &priv->state); in startup_gfar()
1438 priv->oldlink = 0; in startup_gfar()
1439 priv->oldspeed = 0; in startup_gfar()
1440 priv->oldduplex = -1; in startup_gfar()
1442 phy_start(ndev->phydev); in startup_gfar()
1453 struct net_device *ndev = priv->ndev; in gfar_get_flowctrl_cfg()
1454 struct phy_device *phydev = ndev->phydev; in gfar_get_flowctrl_cfg()
1457 if (!phydev->duplex) in gfar_get_flowctrl_cfg()
1460 if (!priv->pause_aneg_en) { in gfar_get_flowctrl_cfg()
1461 if (priv->tx_pause_en) in gfar_get_flowctrl_cfg()
1463 if (priv->rx_pause_en) in gfar_get_flowctrl_cfg()
1470 if (phydev->pause) in gfar_get_flowctrl_cfg()
1472 if (phydev->asym_pause) in gfar_get_flowctrl_cfg()
1475 lcl_adv = linkmode_adv_to_lcl_adv_t(phydev->advertising); in gfar_get_flowctrl_cfg()
1488 struct gfar __iomem *regs = priv->gfargrp[0].regs; in gfar_update_link_state()
1489 struct net_device *ndev = priv->ndev; in gfar_update_link_state()
1490 struct phy_device *phydev = ndev->phydev; in gfar_update_link_state()
1494 if (unlikely(test_bit(GFAR_RESETTING, &priv->state))) in gfar_update_link_state()
1497 if (phydev->link) { in gfar_update_link_state()
1498 u32 tempval1 = gfar_read(&regs->maccfg1); in gfar_update_link_state()
1499 u32 tempval = gfar_read(&regs->maccfg2); in gfar_update_link_state()
1500 u32 ecntrl = gfar_read(&regs->ecntrl); in gfar_update_link_state()
1503 if (phydev->duplex != priv->oldduplex) { in gfar_update_link_state()
1504 if (!(phydev->duplex)) in gfar_update_link_state()
1509 priv->oldduplex = phydev->duplex; in gfar_update_link_state()
1512 if (phydev->speed != priv->oldspeed) { in gfar_update_link_state()
1513 switch (phydev->speed) { in gfar_update_link_state()
1528 if (phydev->speed == SPEED_100) in gfar_update_link_state()
1534 netif_warn(priv, link, priv->ndev, in gfar_update_link_state()
1536 phydev->speed); in gfar_update_link_state()
1540 priv->oldspeed = phydev->speed; in gfar_update_link_state()
1548 for (i = 0; i < priv->num_rx_queues; i++) { in gfar_update_link_state()
1551 rx_queue = priv->rx_queue[i]; in gfar_update_link_state()
1553 gfar_write(rx_queue->rfbptr, bdp_dma); in gfar_update_link_state()
1556 priv->tx_actual_en = 1; in gfar_update_link_state()
1560 priv->tx_actual_en = 0; in gfar_update_link_state()
1562 gfar_write(&regs->maccfg1, tempval1); in gfar_update_link_state()
1563 gfar_write(&regs->maccfg2, tempval); in gfar_update_link_state()
1564 gfar_write(&regs->ecntrl, ecntrl); in gfar_update_link_state()
1566 if (!priv->oldlink) in gfar_update_link_state()
1567 priv->oldlink = 1; in gfar_update_link_state()
1569 } else if (priv->oldlink) { in gfar_update_link_state()
1570 priv->oldlink = 0; in gfar_update_link_state()
1571 priv->oldspeed = 0; in gfar_update_link_state()
1572 priv->oldduplex = -1; in gfar_update_link_state()
1588 struct phy_device *phydev = dev->phydev; in adjust_link()
1590 if (unlikely(phydev->link != priv->oldlink || in adjust_link()
1591 (phydev->link && (phydev->duplex != priv->oldduplex || in adjust_link()
1592 phydev->speed != priv->oldspeed)))) in adjust_link()
1609 if (!priv->tbi_node) { in gfar_configure_serdes()
1610 dev_warn(&dev->dev, "error: SGMII mode requires that the " in gfar_configure_serdes()
1611 "device tree specify a tbi-handle\n"); in gfar_configure_serdes()
1615 tbiphy = of_phy_find_device(priv->tbi_node); in gfar_configure_serdes()
1617 dev_err(&dev->dev, "error: Could not get TBI device\n"); in gfar_configure_serdes()
1622 * configure and reset the TBI<->SerDes link. Maybe U-Boot configured in gfar_configure_serdes()
1623 * everything for us? Resetting it takes the link down and requires in gfar_configure_serdes()
1627 put_device(&tbiphy->mdio.dev); in gfar_configure_serdes()
1642 put_device(&tbiphy->mdio.dev); in gfar_configure_serdes()
1652 phy_interface_t interface = priv->interface; in init_phy()
1661 if (priv->device_flags & FSL_GIANFAR_DEV_HAS_GIGABIT) in init_phy()
1664 priv->oldlink = 0; in init_phy()
1665 priv->oldspeed = 0; in init_phy()
1666 priv->oldduplex = -1; in init_phy()
1668 phydev = of_phy_connect(dev, priv->phy_node, &adjust_link, 0, in init_phy()
1671 dev_err(&dev->dev, "could not attach to PHY\n"); in init_phy()
1672 return -ENODEV; in init_phy()
1679 linkmode_and(phydev->supported, phydev->supported, mask); in init_phy()
1680 linkmode_copy(phydev->advertising, phydev->supported); in init_phy()
1705 * payload. We set it to checksum, using a pseudo-header in gfar_tx_checksum()
1713 if (ip_hdr(skb)->protocol == IPPROTO_UDP) { in gfar_tx_checksum()
1715 fcb->phcs = (__force __be16)(udp_hdr(skb)->check); in gfar_tx_checksum()
1717 fcb->phcs = (__force __be16)(tcp_hdr(skb)->check); in gfar_tx_checksum()
1720 * frame (skb->data) and the start of the IP hdr. in gfar_tx_checksum()
1724 fcb->l3os = (u8)(skb_network_offset(skb) - fcb_length); in gfar_tx_checksum()
1725 fcb->l4os = skb_network_header_len(skb); in gfar_tx_checksum()
1727 fcb->flags = flags; in gfar_tx_checksum()
1732 fcb->flags |= TXFCB_VLN; in gfar_tx_vlan()
1733 fcb->vlctl = cpu_to_be16(skb_vlan_tag_get(skb)); in gfar_tx_vlan()
1741 return (new_bd >= (base + ring_size)) ? (new_bd - ring_size) : new_bd; in skip_txbd()
1769 * It is pointed to by the dev->hard_start_xmit function pointer
1786 rq = skb->queue_mapping; in gfar_start_xmit()
1787 tx_queue = priv->tx_queue[rq]; in gfar_start_xmit()
1789 base = tx_queue->tx_bd_base; in gfar_start_xmit()
1790 regs = tx_queue->grp->regs; in gfar_start_xmit()
1792 do_csum = (CHECKSUM_PARTIAL == skb->ip_summed); in gfar_start_xmit()
1794 do_tstamp = (skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) && in gfar_start_xmit()
1795 priv->hwts_tx_en; in gfar_start_xmit()
1807 dev->stats.tx_errors++; in gfar_start_xmit()
1814 nr_frags = skb_shinfo(skb)->nr_frags; in gfar_start_xmit()
1823 if (nr_txbds > tx_queue->num_txbdfree) { in gfar_start_xmit()
1826 dev->stats.tx_fifo_errors++; in gfar_start_xmit()
1831 bytes_sent = skb->len; in gfar_start_xmit()
1832 tx_queue->stats.tx_bytes += bytes_sent; in gfar_start_xmit()
1834 GFAR_CB(skb)->bytes_sent = bytes_sent; in gfar_start_xmit()
1835 tx_queue->stats.tx_packets++; in gfar_start_xmit()
1837 txbdp = txbdp_start = tx_queue->cur_tx; in gfar_start_xmit()
1838 lstatus = be32_to_cpu(txbdp->lstatus); in gfar_start_xmit()
1843 memset(skb->data, 0, GMAC_TXPAL_LEN); in gfar_start_xmit()
1857 unlikely(gfar_csum_errata_76(priv, skb->len))) { in gfar_start_xmit()
1874 bufaddr = dma_map_single(priv->dev, skb->data, skb_headlen(skb), in gfar_start_xmit()
1876 if (unlikely(dma_mapping_error(priv->dev, bufaddr))) in gfar_start_xmit()
1879 txbdp_start->bufPtr = cpu_to_be32(bufaddr); in gfar_start_xmit()
1884 tx_queue->tx_ring_size); in gfar_start_xmit()
1893 frag = &skb_shinfo(skb)->frags[0]; in gfar_start_xmit()
1898 txbdp = next_txbd(txbdp, base, tx_queue->tx_ring_size); in gfar_start_xmit()
1902 lstatus = be32_to_cpu(txbdp->lstatus) | size | in gfar_start_xmit()
1906 if (i == nr_frags - 1) in gfar_start_xmit()
1909 bufaddr = skb_frag_dma_map(priv->dev, frag, 0, in gfar_start_xmit()
1911 if (unlikely(dma_mapping_error(priv->dev, bufaddr))) in gfar_start_xmit()
1915 txbdp->bufPtr = cpu_to_be32(bufaddr); in gfar_start_xmit()
1916 txbdp->lstatus = cpu_to_be32(lstatus); in gfar_start_xmit()
1928 u32 lstatus_ts = be32_to_cpu(txbdp_tstamp->lstatus); in gfar_start_xmit()
1930 bufaddr = be32_to_cpu(txbdp_start->bufPtr); in gfar_start_xmit()
1934 (skb_headlen(skb) - fcb_len); in gfar_start_xmit()
1938 txbdp_tstamp->bufPtr = cpu_to_be32(bufaddr); in gfar_start_xmit()
1939 txbdp_tstamp->lstatus = cpu_to_be32(lstatus_ts); in gfar_start_xmit()
1943 skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS; in gfar_start_xmit()
1944 fcb->ptp = 1; in gfar_start_xmit()
1954 txbdp_start->lstatus = cpu_to_be32(lstatus); in gfar_start_xmit()
1958 tx_queue->tx_skbuff[tx_queue->skb_curtx] = skb; in gfar_start_xmit()
1963 tx_queue->skb_curtx = (tx_queue->skb_curtx + 1) & in gfar_start_xmit()
1964 TX_RING_MOD_MASK(tx_queue->tx_ring_size); in gfar_start_xmit()
1966 tx_queue->cur_tx = next_txbd(txbdp, base, tx_queue->tx_ring_size); in gfar_start_xmit()
1973 spin_lock_bh(&tx_queue->txlock); in gfar_start_xmit()
1975 tx_queue->num_txbdfree -= (nr_txbds); in gfar_start_xmit()
1976 spin_unlock_bh(&tx_queue->txlock); in gfar_start_xmit()
1979 * are full. We need to tell the kernel to stop sending us stuff. in gfar_start_xmit()
1981 if (!tx_queue->num_txbdfree) { in gfar_start_xmit()
1984 dev->stats.tx_fifo_errors++; in gfar_start_xmit()
1988 gfar_write(&regs->tstat, TSTAT_CLEAR_THALT >> tx_queue->qindex); in gfar_start_xmit()
1993 txbdp = next_txbd(txbdp_start, base, tx_queue->tx_ring_size); in gfar_start_xmit()
1995 txbdp = next_txbd(txbdp, base, tx_queue->tx_ring_size); in gfar_start_xmit()
1997 lstatus = be32_to_cpu(txbdp->lstatus); in gfar_start_xmit()
2002 txbdp->lstatus = cpu_to_be32(lstatus); in gfar_start_xmit()
2003 bufaddr = be32_to_cpu(txbdp->bufPtr); in gfar_start_xmit()
2004 dma_unmap_page(priv->dev, bufaddr, be16_to_cpu(txbdp->length), in gfar_start_xmit()
2006 txbdp = next_txbd(txbdp, base, tx_queue->tx_ring_size); in gfar_start_xmit()
2016 gfar_set_mac_for_addr(dev, 0, dev->dev_addr); in gfar_set_mac_address()
2025 while (test_and_set_bit_lock(GFAR_RESETTING, &priv->state)) in gfar_change_mtu()
2028 if (dev->flags & IFF_UP) in gfar_change_mtu()
2031 WRITE_ONCE(dev->mtu, new_mtu); in gfar_change_mtu()
2033 if (dev->flags & IFF_UP) in gfar_change_mtu()
2036 clear_bit_unlock(GFAR_RESETTING, &priv->state); in gfar_change_mtu()
2045 while (test_and_set_bit_lock(GFAR_RESETTING, &priv->state)) in reset_gfar()
2051 clear_bit_unlock(GFAR_RESETTING, &priv->state); in reset_gfar()
2063 reset_gfar(priv->ndev); in gfar_reset_task()
2070 dev->stats.tx_errors++; in gfar_timeout()
2071 schedule_work(&priv->reset_task); in gfar_timeout()
2079 if (copy_from_user(&config, ifr->ifr_data, sizeof(config))) in gfar_hwtstamp_set()
2080 return -EFAULT; in gfar_hwtstamp_set()
2084 priv->hwts_tx_en = 0; in gfar_hwtstamp_set()
2087 if (!(priv->device_flags & FSL_GIANFAR_DEV_HAS_TIMER)) in gfar_hwtstamp_set()
2088 return -ERANGE; in gfar_hwtstamp_set()
2089 priv->hwts_tx_en = 1; in gfar_hwtstamp_set()
2092 return -ERANGE; in gfar_hwtstamp_set()
2097 if (priv->hwts_rx_en) { in gfar_hwtstamp_set()
2098 priv->hwts_rx_en = 0; in gfar_hwtstamp_set()
2103 if (!(priv->device_flags & FSL_GIANFAR_DEV_HAS_TIMER)) in gfar_hwtstamp_set()
2104 return -ERANGE; in gfar_hwtstamp_set()
2105 if (!priv->hwts_rx_en) { in gfar_hwtstamp_set()
2106 priv->hwts_rx_en = 1; in gfar_hwtstamp_set()
2113 return copy_to_user(ifr->ifr_data, &config, sizeof(config)) ? in gfar_hwtstamp_set()
2114 -EFAULT : 0; in gfar_hwtstamp_set()
2123 config.tx_type = priv->hwts_tx_en ? HWTSTAMP_TX_ON : HWTSTAMP_TX_OFF; in gfar_hwtstamp_get()
2124 config.rx_filter = (priv->hwts_rx_en ? in gfar_hwtstamp_get()
2127 return copy_to_user(ifr->ifr_data, &config, sizeof(config)) ? in gfar_hwtstamp_get()
2128 -EFAULT : 0; in gfar_hwtstamp_get()
2133 struct phy_device *phydev = dev->phydev; in gfar_ioctl()
2136 return -EINVAL; in gfar_ioctl()
2144 return -ENODEV; in gfar_ioctl()
2152 struct net_device *dev = tx_queue->dev; in gfar_clean_tx_ring()
2157 struct txbd8 *base = tx_queue->tx_bd_base; in gfar_clean_tx_ring()
2160 int tx_ring_size = tx_queue->tx_ring_size; in gfar_clean_tx_ring()
2164 int tqi = tx_queue->qindex; in gfar_clean_tx_ring()
2170 bdp = tx_queue->dirty_tx; in gfar_clean_tx_ring()
2171 skb_dirtytx = tx_queue->skb_dirtytx; in gfar_clean_tx_ring()
2173 while ((skb = tx_queue->tx_skbuff[skb_dirtytx])) { in gfar_clean_tx_ring()
2176 do_tstamp = (skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) && in gfar_clean_tx_ring()
2177 priv->hwts_tx_en; in gfar_clean_tx_ring()
2179 frags = skb_shinfo(skb)->nr_frags; in gfar_clean_tx_ring()
2189 lbdp = skip_txbd(bdp, nr_txbds - 1, base, tx_ring_size); in gfar_clean_tx_ring()
2191 lstatus = be32_to_cpu(lbdp->lstatus); in gfar_clean_tx_ring()
2200 buflen = be16_to_cpu(next->length) + in gfar_clean_tx_ring()
2203 buflen = be16_to_cpu(bdp->length); in gfar_clean_tx_ring()
2205 dma_unmap_single(priv->dev, be32_to_cpu(bdp->bufPtr), in gfar_clean_tx_ring()
2212 ns = (__be64 *)(((uintptr_t)skb->data + 0x10) & ~0x7UL); in gfar_clean_tx_ring()
2226 dma_unmap_page(priv->dev, be32_to_cpu(bdp->bufPtr), in gfar_clean_tx_ring()
2227 be16_to_cpu(bdp->length), in gfar_clean_tx_ring()
2233 bytes_sent += GFAR_CB(skb)->bytes_sent; in gfar_clean_tx_ring()
2237 tx_queue->tx_skbuff[skb_dirtytx] = NULL; in gfar_clean_tx_ring()
2243 spin_lock(&tx_queue->txlock); in gfar_clean_tx_ring()
2244 tx_queue->num_txbdfree += nr_txbds; in gfar_clean_tx_ring()
2245 spin_unlock(&tx_queue->txlock); in gfar_clean_tx_ring()
2249 if (tx_queue->num_txbdfree && in gfar_clean_tx_ring()
2251 !(test_bit(GFAR_DOWN, &priv->state))) in gfar_clean_tx_ring()
2252 netif_wake_subqueue(priv->ndev, tqi); in gfar_clean_tx_ring()
2255 tx_queue->skb_dirtytx = skb_dirtytx; in gfar_clean_tx_ring()
2256 tx_queue->dirty_tx = bdp; in gfar_clean_tx_ring()
2264 struct net_device_stats *stats = &ndev->stats; in count_errors()
2265 struct gfar_extra_stats *estats = &priv->extra_stats; in count_errors()
2269 stats->rx_length_errors++; in count_errors()
2271 atomic64_inc(&estats->rx_trunc); in count_errors()
2277 stats->rx_length_errors++; in count_errors()
2280 atomic64_inc(&estats->rx_large); in count_errors()
2282 atomic64_inc(&estats->rx_short); in count_errors()
2285 stats->rx_frame_errors++; in count_errors()
2286 atomic64_inc(&estats->rx_nonoctet); in count_errors()
2289 atomic64_inc(&estats->rx_crcerr); in count_errors()
2290 stats->rx_crc_errors++; in count_errors()
2293 atomic64_inc(&estats->rx_overrun); in count_errors()
2294 stats->rx_over_errors++; in count_errors()
2304 ievent = gfar_read(&grp->regs->ievent); in gfar_receive()
2307 gfar_write(&grp->regs->ievent, IEVENT_FGPI); in gfar_receive()
2311 if (likely(napi_schedule_prep(&grp->napi_rx))) { in gfar_receive()
2312 spin_lock_irqsave(&grp->grplock, flags); in gfar_receive()
2313 imask = gfar_read(&grp->regs->imask); in gfar_receive()
2314 imask &= IMASK_RX_DISABLED | grp->priv->rmon_overflow.imask; in gfar_receive()
2315 gfar_write(&grp->regs->imask, imask); in gfar_receive()
2316 spin_unlock_irqrestore(&grp->grplock, flags); in gfar_receive()
2317 __napi_schedule(&grp->napi_rx); in gfar_receive()
2322 gfar_write(&grp->regs->ievent, IEVENT_RX_MASK); in gfar_receive()
2335 if (likely(napi_schedule_prep(&grp->napi_tx))) { in gfar_transmit()
2336 spin_lock_irqsave(&grp->grplock, flags); in gfar_transmit()
2337 imask = gfar_read(&grp->regs->imask); in gfar_transmit()
2338 imask &= IMASK_TX_DISABLED | grp->priv->rmon_overflow.imask; in gfar_transmit()
2339 gfar_write(&grp->regs->imask, imask); in gfar_transmit()
2340 spin_unlock_irqrestore(&grp->grplock, flags); in gfar_transmit()
2341 __napi_schedule(&grp->napi_tx); in gfar_transmit()
2346 gfar_write(&grp->regs->ievent, IEVENT_TX_MASK); in gfar_transmit()
2356 struct page *page = rxb->page; in gfar_add_rx_frag()
2363 size -= skb->len; in gfar_add_rx_frag()
2369 skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, page, in gfar_add_rx_frag()
2370 rxb->page_offset + RXBUF_ALIGNMENT, in gfar_add_rx_frag()
2379 rxb->page_offset ^= GFAR_RXB_TRUESIZE; in gfar_add_rx_frag()
2390 u16 nta = rxq->next_to_alloc; in gfar_reuse_rx_page()
2392 new_rxb = &rxq->rx_buff[nta]; in gfar_reuse_rx_page()
2396 rxq->next_to_alloc = (nta < rxq->rx_ring_size) ? nta : 0; in gfar_reuse_rx_page()
2402 dma_sync_single_range_for_device(rxq->dev, old_rxb->dma, in gfar_reuse_rx_page()
2403 old_rxb->page_offset, in gfar_reuse_rx_page()
2410 struct gfar_rx_buff *rxb = &rx_queue->rx_buff[rx_queue->next_to_clean]; in gfar_get_next_rxbuff()
2411 struct page *page = rxb->page; in gfar_get_next_rxbuff()
2415 void *buff_addr = page_address(page) + rxb->page_offset; in gfar_get_next_rxbuff()
2426 dma_sync_single_range_for_cpu(rx_queue->dev, rxb->dma, rxb->page_offset, in gfar_get_next_rxbuff()
2434 dma_unmap_page(rx_queue->dev, rxb->dma, in gfar_get_next_rxbuff()
2439 rxb->page = NULL; in gfar_get_next_rxbuff()
2450 if ((be16_to_cpu(fcb->flags) & RXFCB_CSUM_MASK) == in gfar_rx_checksum()
2452 skb->ip_summed = CHECKSUM_UNNECESSARY; in gfar_rx_checksum()
2457 /* gfar_process_frame() -- handle one incoming packet if skb isn't NULL. */
2464 fcb = (struct rxfcb *)skb->data; in gfar_process_frame()
2469 if (priv->uses_rxfcb) in gfar_process_frame()
2473 if (priv->hwts_rx_en) { in gfar_process_frame()
2475 __be64 *ns = (__be64 *)skb->data; in gfar_process_frame()
2478 shhwtstamps->hwtstamp = ns_to_ktime(be64_to_cpu(*ns)); in gfar_process_frame()
2481 if (priv->padding) in gfar_process_frame()
2482 skb_pull(skb, priv->padding); in gfar_process_frame()
2485 pskb_trim(skb, skb->len - ETH_FCS_LEN); in gfar_process_frame()
2487 if (ndev->features & NETIF_F_RXCSUM) in gfar_process_frame()
2494 if (ndev->features & NETIF_F_HW_VLAN_CTAG_RX && in gfar_process_frame()
2495 be16_to_cpu(fcb->flags) & RXFCB_VLN) in gfar_process_frame()
2497 be16_to_cpu(fcb->vlctl)); in gfar_process_frame()
2500 /* gfar_clean_rx_ring() -- Processes each frame in the rx ring
2507 struct net_device *ndev = rx_queue->ndev; in gfar_clean_rx_ring()
2511 struct sk_buff *skb = rx_queue->skb; in gfar_clean_rx_ring()
2516 i = rx_queue->next_to_clean; in gfar_clean_rx_ring()
2518 while (rx_work_limit--) { in gfar_clean_rx_ring()
2526 bdp = &rx_queue->rx_bd_base[i]; in gfar_clean_rx_ring()
2527 lstatus = be32_to_cpu(bdp->lstatus); in gfar_clean_rx_ring()
2537 rx_queue->stats.rx_dropped++; in gfar_clean_rx_ring()
2553 if (unlikely(++i == rx_queue->rx_ring_size)) in gfar_clean_rx_ring()
2556 rx_queue->next_to_clean = i; in gfar_clean_rx_ring()
2568 rx_queue->stats.rx_dropped++; in gfar_clean_rx_ring()
2576 total_bytes += skb->len; in gfar_clean_rx_ring()
2578 skb_record_rx_queue(skb, rx_queue->qindex); in gfar_clean_rx_ring()
2580 skb->protocol = eth_type_trans(skb, ndev); in gfar_clean_rx_ring()
2583 napi_gro_receive(&rx_queue->grp->napi_rx, skb); in gfar_clean_rx_ring()
2589 rx_queue->skb = skb; in gfar_clean_rx_ring()
2591 rx_queue->stats.rx_packets += total_pkts; in gfar_clean_rx_ring()
2592 rx_queue->stats.rx_bytes += total_bytes; in gfar_clean_rx_ring()
2598 if (unlikely(priv->tx_actual_en)) { in gfar_clean_rx_ring()
2601 gfar_write(rx_queue->rfbptr, bdp_dma); in gfar_clean_rx_ring()
2611 struct gfar __iomem *regs = gfargrp->regs; in gfar_poll_rx_sq()
2612 struct gfar_priv_rx_q *rx_queue = gfargrp->rx_queue; in gfar_poll_rx_sq()
2618 gfar_write(&regs->ievent, IEVENT_RX_MASK); in gfar_poll_rx_sq()
2626 gfar_write(&regs->rstat, gfargrp->rstat); in gfar_poll_rx_sq()
2628 spin_lock_irq(&gfargrp->grplock); in gfar_poll_rx_sq()
2629 imask = gfar_read(&regs->imask); in gfar_poll_rx_sq()
2631 gfar_write(&regs->imask, imask); in gfar_poll_rx_sq()
2632 spin_unlock_irq(&gfargrp->grplock); in gfar_poll_rx_sq()
2642 struct gfar __iomem *regs = gfargrp->regs; in gfar_poll_tx_sq()
2643 struct gfar_priv_tx_q *tx_queue = gfargrp->tx_queue; in gfar_poll_tx_sq()
2649 gfar_write(&regs->ievent, IEVENT_TX_MASK); in gfar_poll_tx_sq()
2652 if (tx_queue->tx_skbuff[tx_queue->skb_dirtytx]) in gfar_poll_tx_sq()
2657 spin_lock_irq(&gfargrp->grplock); in gfar_poll_tx_sq()
2658 imask = gfar_read(&regs->imask); in gfar_poll_tx_sq()
2660 gfar_write(&regs->imask, imask); in gfar_poll_tx_sq()
2661 spin_unlock_irq(&gfargrp->grplock); in gfar_poll_tx_sq()
2670 struct gfar __iomem *regs = gfargrp->regs; in gfar_error()
2671 struct gfar_private *priv= gfargrp->priv; in gfar_error()
2672 struct net_device *dev = priv->ndev; in gfar_error()
2675 u32 events = gfar_read(&regs->ievent); in gfar_error()
2678 gfar_write(&regs->ievent, events & IEVENT_ERR_MASK); in gfar_error()
2681 if ((priv->device_flags & FSL_GIANFAR_DEV_HAS_MAGIC_PACKET) && in gfar_error()
2689 events, gfar_read(&regs->imask)); in gfar_error()
2693 dev->stats.tx_errors++; in gfar_error()
2696 dev->stats.tx_window_errors++; in gfar_error()
2698 dev->stats.tx_aborted_errors++; in gfar_error()
2702 dev->stats.tx_dropped++; in gfar_error()
2703 atomic64_inc(&priv->extra_stats.tx_underrun); in gfar_error()
2705 schedule_work(&priv->reset_task); in gfar_error()
2710 struct rmon_mib __iomem *rmon = &regs->rmon; in gfar_error()
2713 spin_lock(&priv->rmon_overflow.lock); in gfar_error()
2714 car = gfar_read(&rmon->car1) & CAR1_C1RDR; in gfar_error()
2716 priv->rmon_overflow.rdrp++; in gfar_error()
2717 gfar_write(&rmon->car1, car); in gfar_error()
2719 spin_unlock(&priv->rmon_overflow.lock); in gfar_error()
2722 dev->stats.rx_over_errors++; in gfar_error()
2723 atomic64_inc(&priv->extra_stats.rx_bsy); in gfar_error()
2726 gfar_read(&regs->rstat)); in gfar_error()
2729 dev->stats.rx_errors++; in gfar_error()
2730 atomic64_inc(&priv->extra_stats.rx_babr); in gfar_error()
2735 atomic64_inc(&priv->extra_stats.eberr); in gfar_error()
2742 atomic64_inc(&priv->extra_stats.tx_babt); in gfar_error()
2754 u32 events = gfar_read(&gfargrp->regs->ievent); in gfar_interrupt()
2772 /* Polling 'interrupt' - used by things like netconsole to send skbs
2773 * without having to re-enable interrupts. It's not called while
2782 if (priv->device_flags & FSL_GIANFAR_DEV_HAS_MULTI_INTR) { in gfar_netpoll()
2783 for (i = 0; i < priv->num_grps; i++) { in gfar_netpoll()
2784 struct gfar_priv_grp *grp = &priv->gfargrp[i]; in gfar_netpoll()
2786 disable_irq(gfar_irq(grp, TX)->irq); in gfar_netpoll()
2787 disable_irq(gfar_irq(grp, RX)->irq); in gfar_netpoll()
2788 disable_irq(gfar_irq(grp, ER)->irq); in gfar_netpoll()
2789 gfar_interrupt(gfar_irq(grp, TX)->irq, grp); in gfar_netpoll()
2790 enable_irq(gfar_irq(grp, ER)->irq); in gfar_netpoll()
2791 enable_irq(gfar_irq(grp, RX)->irq); in gfar_netpoll()
2792 enable_irq(gfar_irq(grp, TX)->irq); in gfar_netpoll()
2795 for (i = 0; i < priv->num_grps; i++) { in gfar_netpoll()
2796 struct gfar_priv_grp *grp = &priv->gfargrp[i]; in gfar_netpoll()
2798 disable_irq(gfar_irq(grp, TX)->irq); in gfar_netpoll()
2799 gfar_interrupt(gfar_irq(grp, TX)->irq, grp); in gfar_netpoll()
2800 enable_irq(gfar_irq(grp, TX)->irq); in gfar_netpoll()
2808 free_irq(gfar_irq(grp, TX)->irq, grp); in free_grp_irqs()
2809 free_irq(gfar_irq(grp, RX)->irq, grp); in free_grp_irqs()
2810 free_irq(gfar_irq(grp, ER)->irq, grp); in free_grp_irqs()
2815 struct gfar_private *priv = grp->priv; in register_grp_irqs()
2816 struct net_device *dev = priv->ndev; in register_grp_irqs()
2822 if (priv->device_flags & FSL_GIANFAR_DEV_HAS_MULTI_INTR) { in register_grp_irqs()
2826 err = request_irq(gfar_irq(grp, ER)->irq, gfar_error, 0, in register_grp_irqs()
2827 gfar_irq(grp, ER)->name, grp); in register_grp_irqs()
2830 gfar_irq(grp, ER)->irq); in register_grp_irqs()
2834 enable_irq_wake(gfar_irq(grp, ER)->irq); in register_grp_irqs()
2836 err = request_irq(gfar_irq(grp, TX)->irq, gfar_transmit, 0, in register_grp_irqs()
2837 gfar_irq(grp, TX)->name, grp); in register_grp_irqs()
2840 gfar_irq(grp, TX)->irq); in register_grp_irqs()
2843 err = request_irq(gfar_irq(grp, RX)->irq, gfar_receive, 0, in register_grp_irqs()
2844 gfar_irq(grp, RX)->name, grp); in register_grp_irqs()
2847 gfar_irq(grp, RX)->irq); in register_grp_irqs()
2850 enable_irq_wake(gfar_irq(grp, RX)->irq); in register_grp_irqs()
2853 err = request_irq(gfar_irq(grp, TX)->irq, gfar_interrupt, 0, in register_grp_irqs()
2854 gfar_irq(grp, TX)->name, grp); in register_grp_irqs()
2857 gfar_irq(grp, TX)->irq); in register_grp_irqs()
2860 enable_irq_wake(gfar_irq(grp, TX)->irq); in register_grp_irqs()
2866 free_irq(gfar_irq(grp, TX)->irq, grp); in register_grp_irqs()
2868 free_irq(gfar_irq(grp, ER)->irq, grp); in register_grp_irqs()
2879 if (priv->device_flags & FSL_GIANFAR_DEV_HAS_MULTI_INTR) { in gfar_free_irq()
2880 for (i = 0; i < priv->num_grps; i++) in gfar_free_irq()
2881 free_grp_irqs(&priv->gfargrp[i]); in gfar_free_irq()
2883 for (i = 0; i < priv->num_grps; i++) in gfar_free_irq()
2884 free_irq(gfar_irq(&priv->gfargrp[i], TX)->irq, in gfar_free_irq()
2885 &priv->gfargrp[i]); in gfar_free_irq()
2893 for (i = 0; i < priv->num_grps; i++) { in gfar_request_irq()
2894 err = register_grp_irqs(&priv->gfargrp[i]); in gfar_request_irq()
2897 free_grp_irqs(&priv->gfargrp[j]); in gfar_request_irq()
2933 cancel_work_sync(&priv->reset_task); in gfar_close()
2937 phy_disconnect(dev->phydev); in gfar_close()
2959 * whenever dev->flags is changed
2965 struct gfar __iomem *regs = priv->gfargrp[0].regs; in gfar_set_multi()
2968 if (dev->flags & IFF_PROMISC) { in gfar_set_multi()
2970 tempval = gfar_read(&regs->rctrl); in gfar_set_multi()
2972 gfar_write(&regs->rctrl, tempval); in gfar_set_multi()
2975 tempval = gfar_read(&regs->rctrl); in gfar_set_multi()
2977 gfar_write(&regs->rctrl, tempval); in gfar_set_multi()
2980 if (dev->flags & IFF_ALLMULTI) { in gfar_set_multi()
2982 gfar_write(&regs->igaddr0, 0xffffffff); in gfar_set_multi()
2983 gfar_write(&regs->igaddr1, 0xffffffff); in gfar_set_multi()
2984 gfar_write(&regs->igaddr2, 0xffffffff); in gfar_set_multi()
2985 gfar_write(&regs->igaddr3, 0xffffffff); in gfar_set_multi()
2986 gfar_write(&regs->igaddr4, 0xffffffff); in gfar_set_multi()
2987 gfar_write(&regs->igaddr5, 0xffffffff); in gfar_set_multi()
2988 gfar_write(&regs->igaddr6, 0xffffffff); in gfar_set_multi()
2989 gfar_write(&regs->igaddr7, 0xffffffff); in gfar_set_multi()
2990 gfar_write(&regs->gaddr0, 0xffffffff); in gfar_set_multi()
2991 gfar_write(&regs->gaddr1, 0xffffffff); in gfar_set_multi()
2992 gfar_write(&regs->gaddr2, 0xffffffff); in gfar_set_multi()
2993 gfar_write(&regs->gaddr3, 0xffffffff); in gfar_set_multi()
2994 gfar_write(&regs->gaddr4, 0xffffffff); in gfar_set_multi()
2995 gfar_write(&regs->gaddr5, 0xffffffff); in gfar_set_multi()
2996 gfar_write(&regs->gaddr6, 0xffffffff); in gfar_set_multi()
2997 gfar_write(&regs->gaddr7, 0xffffffff); in gfar_set_multi()
3003 gfar_write(&regs->igaddr0, 0x0); in gfar_set_multi()
3004 gfar_write(&regs->igaddr1, 0x0); in gfar_set_multi()
3005 gfar_write(&regs->igaddr2, 0x0); in gfar_set_multi()
3006 gfar_write(&regs->igaddr3, 0x0); in gfar_set_multi()
3007 gfar_write(&regs->igaddr4, 0x0); in gfar_set_multi()
3008 gfar_write(&regs->igaddr5, 0x0); in gfar_set_multi()
3009 gfar_write(&regs->igaddr6, 0x0); in gfar_set_multi()
3010 gfar_write(&regs->igaddr7, 0x0); in gfar_set_multi()
3011 gfar_write(&regs->gaddr0, 0x0); in gfar_set_multi()
3012 gfar_write(&regs->gaddr1, 0x0); in gfar_set_multi()
3013 gfar_write(&regs->gaddr2, 0x0); in gfar_set_multi()
3014 gfar_write(&regs->gaddr3, 0x0); in gfar_set_multi()
3015 gfar_write(&regs->gaddr4, 0x0); in gfar_set_multi()
3016 gfar_write(&regs->gaddr5, 0x0); in gfar_set_multi()
3017 gfar_write(&regs->gaddr6, 0x0); in gfar_set_multi()
3018 gfar_write(&regs->gaddr7, 0x0); in gfar_set_multi()
3024 if (priv->extended_hash) { in gfar_set_multi()
3039 gfar_set_mac_for_addr(dev, idx, ha->addr); in gfar_set_multi()
3042 gfar_set_hash_for_addr(dev, ha->addr); in gfar_set_multi()
3049 struct gfar __iomem *regs = priv->gfargrp[0].regs; in gfar_mac_reset()
3053 gfar_write(&regs->maccfg1, MACCFG1_SOFT_RESET); in gfar_mac_reset()
3058 /* the soft reset bit is not self-resetting, so we need to in gfar_mac_reset()
3061 gfar_write(&regs->maccfg1, 0); in gfar_mac_reset()
3068 gfar_write(&regs->maxfrm, GFAR_JUMBO_FRAME_SIZE); in gfar_mac_reset()
3069 gfar_write(&regs->mrblr, GFAR_RXB_SIZE); in gfar_mac_reset()
3072 gfar_write(&regs->minflr, MINFLR_INIT_SETTINGS); in gfar_mac_reset()
3077 /* eTSEC74 erratum: Rx frames of length MAXFRM or MAXFRM-1 in gfar_mac_reset()
3084 gfar_write(&regs->maccfg2, tempval); in gfar_mac_reset()
3087 gfar_write(&regs->igaddr0, 0); in gfar_mac_reset()
3088 gfar_write(&regs->igaddr1, 0); in gfar_mac_reset()
3089 gfar_write(&regs->igaddr2, 0); in gfar_mac_reset()
3090 gfar_write(&regs->igaddr3, 0); in gfar_mac_reset()
3091 gfar_write(&regs->igaddr4, 0); in gfar_mac_reset()
3092 gfar_write(&regs->igaddr5, 0); in gfar_mac_reset()
3093 gfar_write(&regs->igaddr6, 0); in gfar_mac_reset()
3094 gfar_write(&regs->igaddr7, 0); in gfar_mac_reset()
3096 gfar_write(&regs->gaddr0, 0); in gfar_mac_reset()
3097 gfar_write(&regs->gaddr1, 0); in gfar_mac_reset()
3098 gfar_write(&regs->gaddr2, 0); in gfar_mac_reset()
3099 gfar_write(&regs->gaddr3, 0); in gfar_mac_reset()
3100 gfar_write(&regs->gaddr4, 0); in gfar_mac_reset()
3101 gfar_write(&regs->gaddr5, 0); in gfar_mac_reset()
3102 gfar_write(&regs->gaddr6, 0); in gfar_mac_reset()
3103 gfar_write(&regs->gaddr7, 0); in gfar_mac_reset()
3105 if (priv->extended_hash) in gfar_mac_reset()
3106 gfar_clear_exact_match(priv->ndev); in gfar_mac_reset()
3112 gfar_set_mac_address(priv->ndev); in gfar_mac_reset()
3114 gfar_set_multi(priv->ndev); in gfar_mac_reset()
3125 struct gfar __iomem *regs = priv->gfargrp[0].regs; in gfar_hw_init()
3136 if (priv->device_flags & FSL_GIANFAR_DEV_HAS_RMON) { in gfar_hw_init()
3137 memset_io(&regs->rmon, 0, offsetof(struct rmon_mib, car1)); in gfar_hw_init()
3140 gfar_write(&regs->rmon.cam1, 0xffffffff); in gfar_hw_init()
3141 gfar_write(&regs->rmon.cam2, 0xffffffff); in gfar_hw_init()
3143 gfar_write(&regs->rmon.car1, 0xffffffff); in gfar_hw_init()
3144 gfar_write(&regs->rmon.car2, 0xffffffff); in gfar_hw_init()
3148 gfar_write(&regs->ecntrl, ECNTRL_INIT_SETTINGS); in gfar_hw_init()
3151 attrs = ATTRELI_EL(priv->rx_stash_size) | in gfar_hw_init()
3152 ATTRELI_EI(priv->rx_stash_index); in gfar_hw_init()
3154 gfar_write(&regs->attreli, attrs); in gfar_hw_init()
3161 if (priv->bd_stash_en) in gfar_hw_init()
3164 if (priv->rx_stash_size != 0) in gfar_hw_init()
3167 gfar_write(&regs->attr, attrs); in gfar_hw_init()
3170 gfar_write(&regs->fifo_tx_thr, DEFAULT_FIFO_TX_THR); in gfar_hw_init()
3171 gfar_write(&regs->fifo_tx_starve, DEFAULT_FIFO_TX_STARVE); in gfar_hw_init()
3172 gfar_write(&regs->fifo_tx_starve_shutoff, DEFAULT_FIFO_TX_STARVE_OFF); in gfar_hw_init()
3175 if (priv->num_grps > 1) in gfar_hw_init()
3202 struct device_node *np = ofdev->dev.of_node; in gfar_probe()
3213 priv->ndev = dev; in gfar_probe()
3214 priv->ofdev = ofdev; in gfar_probe()
3215 priv->dev = &ofdev->dev; in gfar_probe()
3216 SET_NETDEV_DEV(dev, &ofdev->dev); in gfar_probe()
3218 INIT_WORK(&priv->reset_task, gfar_reset_task); in gfar_probe()
3224 /* Set the dev->base_addr to the gfar reg region */ in gfar_probe()
3225 dev->base_addr = (unsigned long) priv->gfargrp[0].regs; in gfar_probe()
3228 dev->watchdog_timeo = TX_TIMEOUT; in gfar_probe()
3229 /* MTU range: 50 - 9586 */ in gfar_probe()
3230 dev->mtu = 1500; in gfar_probe()
3231 dev->min_mtu = 50; in gfar_probe()
3232 dev->max_mtu = GFAR_JUMBO_FRAME_SIZE - ETH_HLEN; in gfar_probe()
3233 dev->netdev_ops = &gfar_netdev_ops; in gfar_probe()
3234 dev->ethtool_ops = &gfar_ethtool_ops; in gfar_probe()
3237 for (i = 0; i < priv->num_grps; i++) { in gfar_probe()
3238 netif_napi_add(dev, &priv->gfargrp[i].napi_rx, in gfar_probe()
3240 netif_napi_add_tx_weight(dev, &priv->gfargrp[i].napi_tx, in gfar_probe()
3244 if (priv->device_flags & FSL_GIANFAR_DEV_HAS_CSUM) { in gfar_probe()
3245 dev->hw_features = NETIF_F_IP_CSUM | NETIF_F_SG | in gfar_probe()
3247 dev->features |= NETIF_F_IP_CSUM | NETIF_F_SG | in gfar_probe()
3251 if (priv->device_flags & FSL_GIANFAR_DEV_HAS_VLAN) { in gfar_probe()
3252 dev->hw_features |= NETIF_F_HW_VLAN_CTAG_TX | in gfar_probe()
3254 dev->features |= NETIF_F_HW_VLAN_CTAG_RX; in gfar_probe()
3257 dev->priv_flags |= IFF_LIVE_ADDR_CHANGE; in gfar_probe()
3264 if (priv->device_flags & FSL_GIANFAR_DEV_HAS_TIMER) in gfar_probe()
3265 priv->padding = 8 + DEFAULT_PADDING; in gfar_probe()
3267 if (dev->features & NETIF_F_IP_CSUM || in gfar_probe()
3268 priv->device_flags & FSL_GIANFAR_DEV_HAS_TIMER) in gfar_probe()
3269 dev->needed_headroom = GMAC_FCB_LEN + GMAC_TXPAL_LEN; in gfar_probe()
3272 for (i = 0; i < priv->num_tx_queues; i++) { in gfar_probe()
3273 priv->tx_queue[i]->tx_ring_size = DEFAULT_TX_RING_SIZE; in gfar_probe()
3274 priv->tx_queue[i]->num_txbdfree = DEFAULT_TX_RING_SIZE; in gfar_probe()
3275 priv->tx_queue[i]->txcoalescing = DEFAULT_TX_COALESCE; in gfar_probe()
3276 priv->tx_queue[i]->txic = DEFAULT_TXIC; in gfar_probe()
3279 for (i = 0; i < priv->num_rx_queues; i++) { in gfar_probe()
3280 priv->rx_queue[i]->rx_ring_size = DEFAULT_RX_RING_SIZE; in gfar_probe()
3281 priv->rx_queue[i]->rxcoalescing = DEFAULT_RX_COALESCE; in gfar_probe()
3282 priv->rx_queue[i]->rxic = DEFAULT_RXIC; in gfar_probe()
3286 priv->rx_filer_enable = in gfar_probe()
3287 (priv->device_flags & FSL_GIANFAR_DEV_HAS_RX_FILER) ? 1 : 0; in gfar_probe()
3289 priv->msg_enable = (NETIF_MSG_IFUP << 1 ) - 1; in gfar_probe()
3291 if (priv->num_tx_queues == 1) in gfar_probe()
3292 priv->prio_sched_en = 1; in gfar_probe()
3294 set_bit(GFAR_DOWN, &priv->state); in gfar_probe()
3298 if (priv->device_flags & FSL_GIANFAR_DEV_HAS_RMON) { in gfar_probe()
3299 struct rmon_mib __iomem *rmon = &priv->gfargrp[0].regs->rmon; in gfar_probe()
3301 spin_lock_init(&priv->rmon_overflow.lock); in gfar_probe()
3302 priv->rmon_overflow.imask = IMASK_MSRO; in gfar_probe()
3303 gfar_write(&rmon->cam1, gfar_read(&rmon->cam1) & ~CAM1_M1RDR); in gfar_probe()
3312 pr_err("%s: Cannot register net device, aborting\n", dev->name); in gfar_probe()
3316 if (priv->device_flags & FSL_GIANFAR_DEV_HAS_MAGIC_PACKET) in gfar_probe()
3317 priv->wol_supported |= GFAR_WOL_MAGIC; in gfar_probe()
3319 if ((priv->device_flags & FSL_GIANFAR_DEV_HAS_WAKE_ON_FILER) && in gfar_probe()
3320 priv->rx_filer_enable) in gfar_probe()
3321 priv->wol_supported |= GFAR_WOL_FILER_UCAST; in gfar_probe()
3323 device_set_wakeup_capable(&ofdev->dev, priv->wol_supported); in gfar_probe()
3326 for (i = 0; i < priv->num_grps; i++) { in gfar_probe()
3327 struct gfar_priv_grp *grp = &priv->gfargrp[i]; in gfar_probe()
3328 if (priv->device_flags & FSL_GIANFAR_DEV_HAS_MULTI_INTR) { in gfar_probe()
3329 sprintf(gfar_irq(grp, TX)->name, "%s%s%c%s", in gfar_probe()
3330 dev->name, "_g", '0' + i, "_tx"); in gfar_probe()
3331 sprintf(gfar_irq(grp, RX)->name, "%s%s%c%s", in gfar_probe()
3332 dev->name, "_g", '0' + i, "_rx"); in gfar_probe()
3333 sprintf(gfar_irq(grp, ER)->name, "%s%s%c%s", in gfar_probe()
3334 dev->name, "_g", '0' + i, "_er"); in gfar_probe()
3336 strcpy(gfar_irq(grp, TX)->name, dev->name); in gfar_probe()
3343 netdev_info(dev, "mac: %pM\n", dev->dev_addr); in gfar_probe()
3349 for (i = 0; i < priv->num_rx_queues; i++) in gfar_probe()
3351 i, priv->rx_queue[i]->rx_ring_size); in gfar_probe()
3352 for (i = 0; i < priv->num_tx_queues; i++) in gfar_probe()
3354 i, priv->tx_queue[i]->tx_ring_size); in gfar_probe()
3364 of_node_put(priv->phy_node); in gfar_probe()
3365 of_node_put(priv->tbi_node); in gfar_probe()
3373 struct device_node *np = ofdev->dev.of_node; in gfar_remove()
3375 of_node_put(priv->phy_node); in gfar_remove()
3376 of_node_put(priv->tbi_node); in gfar_remove()
3378 unregister_netdev(priv->ndev); in gfar_remove()
3393 struct gfar __iomem *regs = priv->gfargrp[0].regs; in __gfar_filer_disable()
3396 temp = gfar_read(&regs->rctrl); in __gfar_filer_disable()
3398 gfar_write(&regs->rctrl, temp); in __gfar_filer_disable()
3403 struct gfar __iomem *regs = priv->gfargrp[0].regs; in __gfar_filer_enable()
3406 temp = gfar_read(&regs->rctrl); in __gfar_filer_enable()
3408 gfar_write(&regs->rctrl, temp); in __gfar_filer_enable()
3425 if (priv->wol_opts & GFAR_WOL_FILER_UCAST) { in gfar_filer_config_wol()
3427 struct net_device *ndev = priv->ndev; in gfar_filer_config_wol()
3429 u8 qindex = (u8)priv->gfargrp[0].rx_queue->qindex; in gfar_filer_config_wol()
3430 u32 dest_mac_addr = (ndev->dev_addr[0] << 16) | in gfar_filer_config_wol()
3431 (ndev->dev_addr[1] << 8) | in gfar_filer_config_wol()
3432 ndev->dev_addr[2]; in gfar_filer_config_wol()
3439 dest_mac_addr = (ndev->dev_addr[3] << 16) | in gfar_filer_config_wol()
3440 (ndev->dev_addr[4] << 8) | in gfar_filer_config_wol()
3441 ndev->dev_addr[5]; in gfar_filer_config_wol()
3458 rqfcr = priv->ftp_rqfcr[i]; in gfar_filer_restore_table()
3459 rqfpr = priv->ftp_rqfpr[i]; in gfar_filer_restore_table()
3469 struct gfar __iomem *regs = priv->gfargrp[0].regs; in gfar_start_wol_filer()
3474 gfar_write(&regs->rqueue, priv->rqueue); in gfar_start_wol_filer()
3477 tempval = gfar_read(&regs->dmactrl); in gfar_start_wol_filer()
3479 gfar_write(&regs->dmactrl, tempval); in gfar_start_wol_filer()
3482 tempval = gfar_read(&regs->dmactrl); in gfar_start_wol_filer()
3484 gfar_write(&regs->dmactrl, tempval); in gfar_start_wol_filer()
3486 for (i = 0; i < priv->num_grps; i++) { in gfar_start_wol_filer()
3487 regs = priv->gfargrp[i].regs; in gfar_start_wol_filer()
3489 gfar_write(&regs->rstat, priv->gfargrp[i].rstat); in gfar_start_wol_filer()
3491 gfar_write(&regs->imask, IMASK_FGPI); in gfar_start_wol_filer()
3495 tempval = gfar_read(&regs->maccfg1); in gfar_start_wol_filer()
3497 gfar_write(&regs->maccfg1, tempval); in gfar_start_wol_filer()
3503 struct net_device *ndev = priv->ndev; in gfar_suspend()
3504 struct gfar __iomem *regs = priv->gfargrp[0].regs; in gfar_suspend()
3506 u16 wol = priv->wol_opts; in gfar_suspend()
3520 gfar_write(&regs->imask, IMASK_MAG); in gfar_suspend()
3523 tempval = gfar_read(&regs->maccfg2); in gfar_suspend()
3525 gfar_write(&regs->maccfg2, tempval); in gfar_suspend()
3527 /* re-enable the Rx block */ in gfar_suspend()
3528 tempval = gfar_read(&regs->maccfg1); in gfar_suspend()
3530 gfar_write(&regs->maccfg1, tempval); in gfar_suspend()
3537 phy_stop(ndev->phydev); in gfar_suspend()
3546 struct net_device *ndev = priv->ndev; in gfar_resume()
3547 struct gfar __iomem *regs = priv->gfargrp[0].regs; in gfar_resume()
3549 u16 wol = priv->wol_opts; in gfar_resume()
3556 tempval = gfar_read(&regs->maccfg2); in gfar_resume()
3558 gfar_write(&regs->maccfg2, tempval); in gfar_resume()
3566 phy_start(ndev->phydev); in gfar_resume()
3580 struct net_device *ndev = priv->ndev; in gfar_restore()
3596 priv->oldlink = 0; in gfar_restore()
3597 priv->oldspeed = 0; in gfar_restore()
3598 priv->oldduplex = -1; in gfar_restore()
3600 if (ndev->phydev) in gfar_restore()
3601 phy_start(ndev->phydev); in gfar_restore()
3612 .freeze = gfar_suspend,
3641 .name = "fsl-gianfar",