aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2018-03-22 14:10:29 -0700
committerLinus Torvalds <torvalds@linux-foundation.org>2018-03-22 14:10:29 -0700
commitc4f4d2f917729e9b7b8bb452bf4971be93e7a15f (patch)
tree691950e6096068c9909f6864ba1cd0f20de91cad
parent9ce207880d8e4b20b6c1bcd82c749970c2b9e6d2 (diff)
parent5dcd8400884cc4a043a6d4617e042489e5d566a9 (diff)
downloadmm-c4f4d2f917729e9b7b8bb452bf4971be93e7a15f.tar.gz
Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net
Pull networking fixes from David Miller: 1) Always validate XFRM esn replay attribute, from Florian Westphal. 2) Fix RCU read lock imbalance in xfrm_get_tos(), from Xin Long. 3) Don't try to get firmware dump if not loaded in iwlwifi, from Shaul Triebitz. 4) Fix BPF helpers to deal with SCTP GSO SKBs properly, from Daniel Axtens. 5) Fix some interrupt handling issues in e1000e driver, from Benjamin Poitier. 6) Use strlcpy() in several ethtool get_strings methods, from Florian Fainelli. 7) Fix rhlist dup insertion, from Paul Blakey. 8) Fix SKB leak in netem packet scheduler, from Alexey Kodanev. 9) Fix driver unload crash when link is up in smsc911x, from Jeremy Linton. 10) Purge out invalid socket types in l2tp_tunnel_create(), from Eric Dumazet. 11) Need to purge the write queue when TCP connections are aborted, otherwise userspace using MSG_ZEROCOPY can't close the fd. From Soheil Hassas Yeganeh. 12) Fix double free in error path of team driver, from Arkadi Sharshevsky. 13) Filter fixes for hv_netvsc driver, from Stephen Hemminger. 14) Fix non-linear packet access in ipv6 ndisc code, from Lorenzo Bianconi. 15) Properly filter out unsupported feature flags in macvlan driver, from Shannon Nelson. 16) Don't request loading the diag module for a protocol if the protocol itself is not even registered. From Xin Long. 17) If datagram connect fails in ipv6, make sure the socket state is consistent afterwards. From Paolo Abeni. 18) Use after free in qed driver, from Dan Carpenter. 19) If received ipv4 PMTU is less than the min pmtu, lock the mtu in the entry. From Sabrina Dubroca. 20) Fix sleep in atomic in tg3 driver, from Jonathan Toppins. 21) Fix vlan in vlan untagging in some situations, from Toshiaki Makita. 22) Fix double SKB free in genlmsg_mcast(). From Nicolas Dichtel. 23) Fix NULL derefs in error paths of tcf_*_init(), from Davide Caratti. 24) Unbalanced PM runtime calls in FEC driver, from Florian Fainelli. 25) Memory leak in gemini driver, from Igor Pylypiv. 26) IDR leaks in error paths of tcf_*_init() functions, from Davide Caratti. 27) Need to use GFP_ATOMIC in seg6_build_state(), from David Lebrun. 28) Missing dev_put() in error path of macsec_newlink(), from Dan Carpenter. * git://git.kernel.org/pub/scm/linux/kernel/git/davem/net: (201 commits) macsec: missing dev_put() on error in macsec_newlink() net: dsa: Fix functional dsa-loop dependency on FIXED_PHY hv_netvsc: common detach logic hv_netvsc: change GPAD teardown order on older versions hv_netvsc: use RCU to fix concurrent rx and queue changes hv_netvsc: disable NAPI before channel close net/ipv6: Handle onlink flag with multipath routes ppp: avoid loop in xmit recursion detection code ipv6: sr: fix NULL pointer dereference when setting encap source address ipv6: sr: fix scheduling in RCU when creating seg6 lwtunnel state net: aquantia: driver version bump net: aquantia: Implement pci shutdown callback net: aquantia: Allow live mac address changes net: aquantia: Add tx clean budget and valid budget handling logic net: aquantia: Change inefficient wait loop on fw data reads net: aquantia: Fix a regression with reset on old firmware net: aquantia: Fix hardware reset when SPI may rarely hangup s390/qeth: on channel error, reject further cmd requests s390/qeth: lock read device while queueing next buffer s390/qeth: when thread completes, wake up all waiters ...
-rw-r--r--Documentation/devicetree/bindings/net/dsa/marvell.txt48
-rw-r--r--Documentation/devicetree/bindings/net/renesas,ravb.txt6
-rw-r--r--Documentation/networking/segmentation-offloads.txt18
-rw-r--r--Makefile9
-rw-r--r--arch/x86/net/bpf_jit_comp.c3
-rw-r--r--drivers/bluetooth/btusb.c8
-rw-r--r--drivers/bluetooth/hci_bcm.c13
-rw-r--r--drivers/net/can/cc770/cc770.c100
-rw-r--r--drivers/net/can/cc770/cc770.h2
-rw-r--r--drivers/net/can/ifi_canfd/ifi_canfd.c75
-rw-r--r--drivers/net/can/m_can/m_can.c7
-rw-r--r--drivers/net/can/peak_canfd/peak_canfd.c25
-rw-r--r--drivers/net/can/peak_canfd/peak_pciefd_main.c8
-rw-r--r--drivers/net/dsa/Makefile5
-rw-r--r--drivers/net/dsa/b53/b53_common.c4
-rw-r--r--drivers/net/ethernet/8390/Kconfig2
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/aq_cfg.h2
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/aq_nic.c22
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/aq_nic.h1
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/aq_pci_func.c15
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/aq_ring.c7
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/aq_ring.h2
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/aq_vec.c11
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils.c66
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils.h1
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/ver.h2
-rw-r--r--drivers/net/ethernet/arc/emac_rockchip.c6
-rw-r--r--drivers/net/ethernet/broadcom/bcmsysport.c33
-rw-r--r--drivers/net/ethernet/broadcom/bcmsysport.h2
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c2
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt.c180
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt.h1
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_tc.c33
-rw-r--r--drivers/net/ethernet/broadcom/tg3.c2
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c3
-rw-r--r--drivers/net/ethernet/cortina/gemini.c1
-rw-r--r--drivers/net/ethernet/freescale/dpaa/dpaa_eth.c8
-rw-r--r--drivers/net/ethernet/freescale/fec_main.c2
-rw-r--r--drivers/net/ethernet/freescale/fman/fman_dtsec.c2
-rw-r--r--drivers/net/ethernet/hisilicon/hns/hns_dsaf_gmac.c2
-rw-r--r--drivers/net/ethernet/hisilicon/hns/hns_dsaf_ppe.c2
-rw-r--r--drivers/net/ethernet/hisilicon/hns/hns_dsaf_rcb.c2
-rw-r--r--drivers/net/ethernet/hisilicon/hns/hns_ethtool.c4
-rw-r--r--drivers/net/ethernet/intel/e1000e/defines.h21
-rw-r--r--drivers/net/ethernet/intel/e1000e/ich8lan.c42
-rw-r--r--drivers/net/ethernet/intel/e1000e/mac.c25
-rw-r--r--drivers/net/ethernet/intel/e1000e/netdev.c37
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_actions.c11
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_actions.h1
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum.c28
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum.h4
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_acl.c5
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_buffers.c12
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_flower.c2
-rw-r--r--drivers/net/ethernet/natsemi/Kconfig6
-rw-r--r--drivers/net/ethernet/natsemi/Makefile2
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_cxt.c5
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_iwarp.c17
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_rdma.c1
-rw-r--r--drivers/net/ethernet/qlogic/qede/qede_main.c8
-rw-r--r--drivers/net/ethernet/qlogic/qede/qede_ptp.c2
-rw-r--r--drivers/net/ethernet/qualcomm/emac/emac-mac.c23
-rw-r--r--drivers/net/ethernet/smsc/smsc911x.c4
-rw-r--r--drivers/net/ethernet/socionext/sni_ave.c2
-rw-r--r--drivers/net/ethernet/sun/sunvnet.c2
-rw-r--r--drivers/net/ethernet/ti/cpsw.c3
-rw-r--r--drivers/net/hyperv/hyperv_net.h2
-rw-r--r--drivers/net/hyperv/netvsc.c52
-rw-r--r--drivers/net/hyperv/netvsc_drv.c293
-rw-r--r--drivers/net/hyperv/rndis_filter.c68
-rw-r--r--drivers/net/macsec.c5
-rw-r--r--drivers/net/macvlan.c2
-rw-r--r--drivers/net/phy/bcm-phy-lib.c4
-rw-r--r--drivers/net/phy/marvell.c4
-rw-r--r--drivers/net/phy/micrel.c27
-rw-r--r--drivers/net/phy/phy.c145
-rw-r--r--drivers/net/phy/phy_device.c32
-rw-r--r--drivers/net/phy/realtek.c2
-rw-r--r--drivers/net/ppp/ppp_generic.c26
-rw-r--r--drivers/net/team/team.c4
-rw-r--r--drivers/net/tun.c3
-rw-r--r--drivers/net/usb/usbnet.c10
-rw-r--r--drivers/net/vmxnet3/vmxnet3_drv.c16
-rw-r--r--drivers/net/vmxnet3/vmxnet3_int.h7
-rw-r--r--drivers/net/wireless/ath/ath9k/htc_drv_init.c1
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/core.h2
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/feature.c10
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwil.c3
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/p2p.c24
-rw-r--r--drivers/net/wireless/intel/iwlwifi/Kconfig1
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/api/time-event.h4
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/dbg.c13
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/dbg.h3
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/debugfs.h18
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/init.c12
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/runtime.h7
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/d3.c8
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/debugfs.c5
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/mac-ctxt.c3
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c49
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/mvm.h2
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/ops.c10
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/rs.c28
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c39
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/sta.c33
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/time-event.c6
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/tx.c13
-rw-r--r--drivers/net/wireless/intel/iwlwifi/pcie/tx-gen2.c2
-rw-r--r--drivers/net/wireless/intel/iwlwifi/pcie/tx.c2
-rw-r--r--drivers/net/wireless/mac80211_hwsim.c1
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8723be/hw.c3
-rw-r--r--drivers/s390/net/qeth_core_main.c21
-rw-r--r--drivers/s390/net/qeth_l2_main.c2
-rw-r--r--drivers/s390/net/qeth_l3_main.c2
-rw-r--r--drivers/soc/fsl/qbman/qman.c28
-rw-r--r--drivers/vhost/net.c8
-rw-r--r--fs/sysfs/symlink.c1
-rw-r--r--include/linux/cgroup-defs.h4
-rw-r--r--include/linux/if_tun.h4
-rw-r--r--include/linux/if_vlan.h66
-rw-r--r--include/linux/net.h1
-rw-r--r--include/linux/netfilter/x_tables.h2
-rw-r--r--include/linux/phy.h5
-rw-r--r--include/linux/rhashtable.h4
-rw-r--r--include/linux/skbuff.h22
-rw-r--r--include/linux/u64_stats_sync.h22
-rw-r--r--include/net/ip.h11
-rw-r--r--include/net/ip6_route.h3
-rw-r--r--include/net/ip_fib.h1
-rw-r--r--include/net/mac80211.h4
-rw-r--r--include/net/route.h6
-rw-r--r--include/net/sch_generic.h19
-rw-r--r--include/net/sock.h1
-rw-r--r--include/uapi/linux/if_ether.h1
-rw-r--r--kernel/bpf/syscall.c2
-rw-r--r--kernel/fail_function.c10
-rw-r--r--kernel/trace/bpf_trace.c68
-rw-r--r--lib/rhashtable.c4
-rw-r--r--lib/test_bpf.c2
-rw-r--r--lib/test_rhashtable.c134
-rw-r--r--net/8021q/vlan_core.c4
-rw-r--r--net/batman-adv/distributed-arp-table.c2
-rw-r--r--net/batman-adv/icmp_socket.c1
-rw-r--r--net/batman-adv/log.c1
-rw-r--r--net/batman-adv/multicast.c4
-rw-r--r--net/batman-adv/routing.c25
-rw-r--r--net/bluetooth/smp.c8
-rw-r--r--net/bridge/netfilter/ebt_among.c34
-rw-r--r--net/bridge/netfilter/ebtables.c6
-rw-r--r--net/core/dev.c22
-rw-r--r--net/core/dev_ioctl.c7
-rw-r--r--net/core/devlink.c16
-rw-r--r--net/core/filter.c60
-rw-r--r--net/core/skbuff.c11
-rw-r--r--net/core/sock.c21
-rw-r--r--net/core/sock_diag.c12
-rw-r--r--net/dccp/proto.c5
-rw-r--r--net/dsa/legacy.c2
-rw-r--r--net/ieee802154/6lowpan/core.c12
-rw-r--r--net/ipv4/inet_diag.c3
-rw-r--r--net/ipv4/inet_fragment.c3
-rw-r--r--net/ipv4/ip_sockglue.c6
-rw-r--r--net/ipv4/route.c47
-rw-r--r--net/ipv4/tcp.c1
-rw-r--r--net/ipv4/tcp_timer.c1
-rw-r--r--net/ipv4/xfrm4_mode_tunnel.c3
-rw-r--r--net/ipv4/xfrm4_policy.c5
-rw-r--r--net/ipv6/datagram.c21
-rw-r--r--net/ipv6/ip6_gre.c8
-rw-r--r--net/ipv6/ndisc.c3
-rw-r--r--net/ipv6/route.c76
-rw-r--r--net/ipv6/seg6_iptunnel.c7
-rw-r--r--net/ipv6/xfrm6_mode_tunnel.c3
-rw-r--r--net/ipv6/xfrm6_policy.c5
-rw-r--r--net/iucv/af_iucv.c4
-rw-r--r--net/kcm/kcmsock.c33
-rw-r--r--net/l2tp/l2tp_core.c46
-rw-r--r--net/l2tp/l2tp_core.h3
-rw-r--r--net/mac80211/debugfs.c1
-rw-r--r--net/mac80211/mlme.c3
-rw-r--r--net/netfilter/nf_tables_api.c1
-rw-r--r--net/netfilter/nft_set_hash.c2
-rw-r--r--net/netfilter/x_tables.c30
-rw-r--r--net/netfilter/xt_hashlimit.c16
-rw-r--r--net/netfilter/xt_recent.c6
-rw-r--r--net/netlink/genetlink.c2
-rw-r--r--net/openvswitch/meter.c12
-rw-r--r--net/sched/act_bpf.c2
-rw-r--r--net/sched/act_csum.c5
-rw-r--r--net/sched/act_ipt.c9
-rw-r--r--net/sched/act_pedit.c2
-rw-r--r--net/sched/act_police.c2
-rw-r--r--net/sched/act_sample.c3
-rw-r--r--net/sched/act_simple.c2
-rw-r--r--net/sched/act_skbmod.c5
-rw-r--r--net/sched/act_tunnel_key.c10
-rw-r--r--net/sched/act_vlan.c5
-rw-r--r--net/sched/sch_generic.c22
-rw-r--r--net/sched/sch_netem.c2
-rw-r--r--net/sctp/input.c8
-rw-r--r--net/sctp/inqueue.c2
-rw-r--r--net/sctp/offload.c2
-rw-r--r--net/smc/af_smc.c4
-rw-r--r--net/smc/smc_close.c25
-rw-r--r--net/socket.c5
-rw-r--r--net/xfrm/xfrm_ipcomp.c2
-rw-r--r--net/xfrm/xfrm_policy.c13
-rw-r--r--net/xfrm/xfrm_replay.c2
-rw-r--r--net/xfrm/xfrm_state.c5
-rw-r--r--net/xfrm/xfrm_user.c21
-rw-r--r--tools/bpf/bpftool/common.c4
211 files changed, 2086 insertions, 1152 deletions
diff --git a/Documentation/devicetree/bindings/net/dsa/marvell.txt b/Documentation/devicetree/bindings/net/dsa/marvell.txt
index 1d4d0f49c9d06e..8c033d48e2baf0 100644
--- a/Documentation/devicetree/bindings/net/dsa/marvell.txt
+++ b/Documentation/devicetree/bindings/net/dsa/marvell.txt
@@ -50,14 +50,15 @@ Example:
compatible = "marvell,mv88e6085";
reg = <0>;
reset-gpios = <&gpio5 1 GPIO_ACTIVE_LOW>;
- };
- mdio {
- #address-cells = <1>;
- #size-cells = <0>;
- switch1phy0: switch1phy0@0 {
- reg = <0>;
- interrupt-parent = <&switch0>;
- interrupts = <0 IRQ_TYPE_LEVEL_HIGH>;
+
+ mdio {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ switch1phy0: switch1phy0@0 {
+ reg = <0>;
+ interrupt-parent = <&switch0>;
+ interrupts = <0 IRQ_TYPE_LEVEL_HIGH>;
+ };
};
};
};
@@ -74,23 +75,24 @@ Example:
compatible = "marvell,mv88e6390";
reg = <0>;
reset-gpios = <&gpio5 1 GPIO_ACTIVE_LOW>;
- };
- mdio {
- #address-cells = <1>;
- #size-cells = <0>;
- switch1phy0: switch1phy0@0 {
- reg = <0>;
- interrupt-parent = <&switch0>;
- interrupts = <0 IRQ_TYPE_LEVEL_HIGH>;
+
+ mdio {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ switch1phy0: switch1phy0@0 {
+ reg = <0>;
+ interrupt-parent = <&switch0>;
+ interrupts = <0 IRQ_TYPE_LEVEL_HIGH>;
+ };
};
- };
- mdio1 {
- compatible = "marvell,mv88e6xxx-mdio-external";
- #address-cells = <1>;
- #size-cells = <0>;
- switch1phy9: switch1phy0@9 {
- reg = <9>;
+ mdio1 {
+ compatible = "marvell,mv88e6xxx-mdio-external";
+ #address-cells = <1>;
+ #size-cells = <0>;
+ switch1phy9: switch1phy0@9 {
+ reg = <9>;
+ };
};
};
};
diff --git a/Documentation/devicetree/bindings/net/renesas,ravb.txt b/Documentation/devicetree/bindings/net/renesas,ravb.txt
index 92fd4b2f17b24b..b4dc455eb1554e 100644
--- a/Documentation/devicetree/bindings/net/renesas,ravb.txt
+++ b/Documentation/devicetree/bindings/net/renesas,ravb.txt
@@ -27,7 +27,11 @@ Required properties:
SoC-specific version corresponding to the platform first followed by
the generic version.
-- reg: offset and length of (1) the register block and (2) the stream buffer.
+- reg: Offset and length of (1) the register block and (2) the stream buffer.
+ The region for the register block is mandatory.
+ The region for the stream buffer is optional, as it is only present on
+ R-Car Gen2 and RZ/G1 SoCs, and on R-Car H3 (R8A7795), M3-W (R8A7796),
+ and M3-N (R8A77965).
- interrupts: A list of interrupt-specifiers, one for each entry in
interrupt-names.
If interrupt-names is not present, an interrupt specifier
diff --git a/Documentation/networking/segmentation-offloads.txt b/Documentation/networking/segmentation-offloads.txt
index d47480b61ac6d0..aca542ec125c96 100644
--- a/Documentation/networking/segmentation-offloads.txt
+++ b/Documentation/networking/segmentation-offloads.txt
@@ -20,8 +20,8 @@ TCP Segmentation Offload
TCP segmentation allows a device to segment a single frame into multiple
frames with a data payload size specified in skb_shinfo()->gso_size.
-When TCP segmentation requested the bit for either SKB_GSO_TCP or
-SKB_GSO_TCP6 should be set in skb_shinfo()->gso_type and
+When TCP segmentation requested the bit for either SKB_GSO_TCPV4 or
+SKB_GSO_TCPV6 should be set in skb_shinfo()->gso_type and
skb_shinfo()->gso_size should be set to a non-zero value.
TCP segmentation is dependent on support for the use of partial checksum
@@ -153,8 +153,18 @@ To signal this, gso_size is set to the special value GSO_BY_FRAGS.
Therefore, any code in the core networking stack must be aware of the
possibility that gso_size will be GSO_BY_FRAGS and handle that case
-appropriately. (For size checks, the skb_gso_validate_*_len family of
-helpers do this automatically.)
+appropriately.
+
+There are some helpers to make this easier:
+
+ - skb_is_gso(skb) && skb_is_gso_sctp(skb) is the best way to see if
+ an skb is an SCTP GSO skb.
+
+ - For size checks, the skb_gso_validate_*_len family of helpers correctly
+ considers GSO_BY_FRAGS.
+
+ - For manipulating packets, skb_increase_gso_size and skb_decrease_gso_size
+ will check for GSO_BY_FRAGS and WARN if asked to manipulate these skbs.
This also affects drivers with the NETIF_F_FRAGLIST & NETIF_F_GSO_SCTP bits
set. Note also that NETIF_F_GSO_SCTP is included in NETIF_F_GSO_SOFTWARE.
diff --git a/Makefile b/Makefile
index d65e2e22901750..486db374d1c1ad 100644
--- a/Makefile
+++ b/Makefile
@@ -826,6 +826,15 @@ KBUILD_CFLAGS += $(call cc-disable-warning, pointer-sign)
# disable invalid "can't wrap" optimizations for signed / pointers
KBUILD_CFLAGS += $(call cc-option,-fno-strict-overflow)
+# clang sets -fmerge-all-constants by default as optimization, but this
+# is non-conforming behavior for C and in fact breaks the kernel, so we
+# need to disable it here generally.
+KBUILD_CFLAGS += $(call cc-option,-fno-merge-all-constants)
+
+# for gcc -fno-merge-all-constants disables everything, but it is fine
+# to have actual conforming behavior enabled.
+KBUILD_CFLAGS += $(call cc-option,-fmerge-constants)
+
# Make sure -fstack-check isn't enabled (like gentoo apparently did)
KBUILD_CFLAGS += $(call cc-option,-fno-stack-check,)
diff --git a/arch/x86/net/bpf_jit_comp.c b/arch/x86/net/bpf_jit_comp.c
index 45e4eb5bcbb2ab..ce5b2ebd57015d 100644
--- a/arch/x86/net/bpf_jit_comp.c
+++ b/arch/x86/net/bpf_jit_comp.c
@@ -1188,7 +1188,7 @@ skip_init_addrs:
* may converge on the last pass. In such case do one more
* pass to emit the final image
*/
- for (pass = 0; pass < 10 || image; pass++) {
+ for (pass = 0; pass < 20 || image; pass++) {
proglen = do_jit(prog, addrs, image, oldproglen, &ctx);
if (proglen <= 0) {
image = NULL;
@@ -1215,6 +1215,7 @@ skip_init_addrs:
}
}
oldproglen = proglen;
+ cond_resched();
}
if (bpf_jit_enable > 1)
diff --git a/drivers/bluetooth/btusb.c b/drivers/bluetooth/btusb.c
index 60bf04b8f1034c..366a49c7c08f22 100644
--- a/drivers/bluetooth/btusb.c
+++ b/drivers/bluetooth/btusb.c
@@ -231,7 +231,6 @@ static const struct usb_device_id blacklist_table[] = {
{ USB_DEVICE(0x0930, 0x0227), .driver_info = BTUSB_ATH3012 },
{ USB_DEVICE(0x0b05, 0x17d0), .driver_info = BTUSB_ATH3012 },
{ USB_DEVICE(0x0cf3, 0x0036), .driver_info = BTUSB_ATH3012 },
- { USB_DEVICE(0x0cf3, 0x3004), .driver_info = BTUSB_ATH3012 },
{ USB_DEVICE(0x0cf3, 0x3008), .driver_info = BTUSB_ATH3012 },
{ USB_DEVICE(0x0cf3, 0x311d), .driver_info = BTUSB_ATH3012 },
{ USB_DEVICE(0x0cf3, 0x311e), .driver_info = BTUSB_ATH3012 },
@@ -264,6 +263,7 @@ static const struct usb_device_id blacklist_table[] = {
{ USB_DEVICE(0x0489, 0xe03c), .driver_info = BTUSB_ATH3012 },
/* QCA ROME chipset */
+ { USB_DEVICE(0x0cf3, 0x3004), .driver_info = BTUSB_QCA_ROME },
{ USB_DEVICE(0x0cf3, 0xe007), .driver_info = BTUSB_QCA_ROME },
{ USB_DEVICE(0x0cf3, 0xe009), .driver_info = BTUSB_QCA_ROME },
{ USB_DEVICE(0x0cf3, 0xe010), .driver_info = BTUSB_QCA_ROME },
@@ -386,10 +386,10 @@ static const struct usb_device_id blacklist_table[] = {
*/
static const struct dmi_system_id btusb_needs_reset_resume_table[] = {
{
- /* Lenovo Yoga 920 (QCA Rome device 0cf3:e300) */
+ /* Dell OptiPlex 3060 (QCA ROME device 0cf3:e007) */
.matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
- DMI_MATCH(DMI_PRODUCT_VERSION, "Lenovo YOGA 920"),
+ DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
+ DMI_MATCH(DMI_PRODUCT_NAME, "OptiPlex 3060"),
},
},
{}
diff --git a/drivers/bluetooth/hci_bcm.c b/drivers/bluetooth/hci_bcm.c
index 6314dfb02969a9..40b9fb24701016 100644
--- a/drivers/bluetooth/hci_bcm.c
+++ b/drivers/bluetooth/hci_bcm.c
@@ -244,7 +244,9 @@ static irqreturn_t bcm_host_wake(int irq, void *data)
bt_dev_dbg(bdev, "Host wake IRQ");
- pm_request_resume(bdev->dev);
+ pm_runtime_get(bdev->dev);
+ pm_runtime_mark_last_busy(bdev->dev);
+ pm_runtime_put_autosuspend(bdev->dev);
return IRQ_HANDLED;
}
@@ -301,7 +303,7 @@ static const struct bcm_set_sleep_mode default_sleep_params = {
.usb_auto_sleep = 0,
.usb_resume_timeout = 0,
.break_to_host = 0,
- .pulsed_host_wake = 0,
+ .pulsed_host_wake = 1,
};
static int bcm_setup_sleep(struct hci_uart *hu)
@@ -586,8 +588,11 @@ static int bcm_recv(struct hci_uart *hu, const void *data, int count)
} else if (!bcm->rx_skb) {
/* Delay auto-suspend when receiving completed packet */
mutex_lock(&bcm_device_lock);
- if (bcm->dev && bcm_device_exists(bcm->dev))
- pm_request_resume(bcm->dev->dev);
+ if (bcm->dev && bcm_device_exists(bcm->dev)) {
+ pm_runtime_get(bcm->dev->dev);
+ pm_runtime_mark_last_busy(bcm->dev->dev);
+ pm_runtime_put_autosuspend(bcm->dev->dev);
+ }
mutex_unlock(&bcm_device_lock);
}
diff --git a/drivers/net/can/cc770/cc770.c b/drivers/net/can/cc770/cc770.c
index 1e37313054f395..6da69af103e60d 100644
--- a/drivers/net/can/cc770/cc770.c
+++ b/drivers/net/can/cc770/cc770.c
@@ -390,37 +390,23 @@ static int cc770_get_berr_counter(const struct net_device *dev,
return 0;
}
-static netdev_tx_t cc770_start_xmit(struct sk_buff *skb, struct net_device *dev)
+static void cc770_tx(struct net_device *dev, int mo)
{
struct cc770_priv *priv = netdev_priv(dev);
- struct net_device_stats *stats = &dev->stats;
- struct can_frame *cf = (struct can_frame *)skb->data;
- unsigned int mo = obj2msgobj(CC770_OBJ_TX);
+ struct can_frame *cf = (struct can_frame *)priv->tx_skb->data;
u8 dlc, rtr;
u32 id;
int i;
- if (can_dropped_invalid_skb(dev, skb))
- return NETDEV_TX_OK;
-
- if ((cc770_read_reg(priv,
- msgobj[mo].ctrl1) & TXRQST_UNC) == TXRQST_SET) {
- netdev_err(dev, "TX register is still occupied!\n");
- return NETDEV_TX_BUSY;
- }
-
- netif_stop_queue(dev);
-
dlc = cf->can_dlc;
id = cf->can_id;
- if (cf->can_id & CAN_RTR_FLAG)
- rtr = 0;
- else
- rtr = MSGCFG_DIR;
+ rtr = cf->can_id & CAN_RTR_FLAG ? 0 : MSGCFG_DIR;
+
+ cc770_write_reg(priv, msgobj[mo].ctrl0,
+ MSGVAL_RES | TXIE_RES | RXIE_RES | INTPND_RES);
cc770_write_reg(priv, msgobj[mo].ctrl1,
RMTPND_RES | TXRQST_RES | CPUUPD_SET | NEWDAT_RES);
- cc770_write_reg(priv, msgobj[mo].ctrl0,
- MSGVAL_SET | TXIE_SET | RXIE_RES | INTPND_RES);
+
if (id & CAN_EFF_FLAG) {
id &= CAN_EFF_MASK;
cc770_write_reg(priv, msgobj[mo].config,
@@ -439,22 +425,30 @@ static netdev_tx_t cc770_start_xmit(struct sk_buff *skb, struct net_device *dev)
for (i = 0; i < dlc; i++)
cc770_write_reg(priv, msgobj[mo].data[i], cf->data[i]);
- /* Store echo skb before starting the transfer */
- can_put_echo_skb(skb, dev, 0);
-
cc770_write_reg(priv, msgobj[mo].ctrl1,
- RMTPND_RES | TXRQST_SET | CPUUPD_RES | NEWDAT_UNC);
+ RMTPND_UNC | TXRQST_SET | CPUUPD_RES | NEWDAT_UNC);
+ cc770_write_reg(priv, msgobj[mo].ctrl0,
+ MSGVAL_SET | TXIE_SET | RXIE_SET | INTPND_UNC);
+}
- stats->tx_bytes += dlc;
+static netdev_tx_t cc770_start_xmit(struct sk_buff *skb, struct net_device *dev)
+{
+ struct cc770_priv *priv = netdev_priv(dev);
+ unsigned int mo = obj2msgobj(CC770_OBJ_TX);
+ if (can_dropped_invalid_skb(dev, skb))
+ return NETDEV_TX_OK;
- /*
- * HM: We had some cases of repeated IRQs so make sure the
- * INT is acknowledged I know it's already further up, but
- * doing again fixed the issue
- */
- cc770_write_reg(priv, msgobj[mo].ctrl0,
- MSGVAL_UNC | TXIE_UNC | RXIE_UNC | INTPND_RES);
+ netif_stop_queue(dev);
+
+ if ((cc770_read_reg(priv,
+ msgobj[mo].ctrl1) & TXRQST_UNC) == TXRQST_SET) {
+ netdev_err(dev, "TX register is still occupied!\n");
+ return NETDEV_TX_BUSY;
+ }
+
+ priv->tx_skb = skb;
+ cc770_tx(dev, mo);
return NETDEV_TX_OK;
}
@@ -680,19 +674,46 @@ static void cc770_tx_interrupt(struct net_device *dev, unsigned int o)
struct cc770_priv *priv = netdev_priv(dev);
struct net_device_stats *stats = &dev->stats;
unsigned int mo = obj2msgobj(o);
+ struct can_frame *cf;
+ u8 ctrl1;
+
+ ctrl1 = cc770_read_reg(priv, msgobj[mo].ctrl1);
- /* Nothing more to send, switch off interrupts */
cc770_write_reg(priv, msgobj[mo].ctrl0,
MSGVAL_RES | TXIE_RES | RXIE_RES | INTPND_RES);
- /*
- * We had some cases of repeated IRQ so make sure the
- * INT is acknowledged
+ cc770_write_reg(priv, msgobj[mo].ctrl1,
+ RMTPND_RES | TXRQST_RES | MSGLST_RES | NEWDAT_RES);
+
+ if (unlikely(!priv->tx_skb)) {
+ netdev_err(dev, "missing tx skb in tx interrupt\n");
+ return;
+ }
+
+ if (unlikely(ctrl1 & MSGLST_SET)) {
+ stats->rx_over_errors++;
+ stats->rx_errors++;
+ }
+
+ /* When the CC770 is sending an RTR message and it receives a regular
+ * message that matches the id of the RTR message, it will overwrite the
+ * outgoing message in the TX register. When this happens we must
+ * process the received message and try to transmit the outgoing skb
+ * again.
*/
- cc770_write_reg(priv, msgobj[mo].ctrl0,
- MSGVAL_UNC | TXIE_UNC | RXIE_UNC | INTPND_RES);
+ if (unlikely(ctrl1 & NEWDAT_SET)) {
+ cc770_rx(dev, mo, ctrl1);
+ cc770_tx(dev, mo);
+ return;
+ }
+ cf = (struct can_frame *)priv->tx_skb->data;
+ stats->tx_bytes += cf->can_dlc;
stats->tx_packets++;
+
+ can_put_echo_skb(priv->tx_skb, dev, 0);
can_get_echo_skb(dev, 0);
+ priv->tx_skb = NULL;
+
netif_wake_queue(dev);
}
@@ -804,6 +825,7 @@ struct net_device *alloc_cc770dev(int sizeof_priv)
priv->can.do_set_bittiming = cc770_set_bittiming;
priv->can.do_set_mode = cc770_set_mode;
priv->can.ctrlmode_supported = CAN_CTRLMODE_3_SAMPLES;
+ priv->tx_skb = NULL;
memcpy(priv->obj_flags, cc770_obj_flags, sizeof(cc770_obj_flags));
diff --git a/drivers/net/can/cc770/cc770.h b/drivers/net/can/cc770/cc770.h
index a1739db98d911f..95752e1d128397 100644
--- a/drivers/net/can/cc770/cc770.h
+++ b/drivers/net/can/cc770/cc770.h
@@ -193,6 +193,8 @@ struct cc770_priv {
u8 cpu_interface; /* CPU interface register */
u8 clkout; /* Clock out register */
u8 bus_config; /* Bus conffiguration register */
+
+ struct sk_buff *tx_skb;
};
struct net_device *alloc_cc770dev(int sizeof_priv);
diff --git a/drivers/net/can/ifi_canfd/ifi_canfd.c b/drivers/net/can/ifi_canfd/ifi_canfd.c
index 2772d05ff11caa..fedd927ba6ed99 100644
--- a/drivers/net/can/ifi_canfd/ifi_canfd.c
+++ b/drivers/net/can/ifi_canfd/ifi_canfd.c
@@ -30,6 +30,7 @@
#define IFI_CANFD_STCMD_ERROR_ACTIVE BIT(2)
#define IFI_CANFD_STCMD_ERROR_PASSIVE BIT(3)
#define IFI_CANFD_STCMD_BUSOFF BIT(4)
+#define IFI_CANFD_STCMD_ERROR_WARNING BIT(5)
#define IFI_CANFD_STCMD_BUSMONITOR BIT(16)
#define IFI_CANFD_STCMD_LOOPBACK BIT(18)
#define IFI_CANFD_STCMD_DISABLE_CANFD BIT(24)
@@ -52,7 +53,10 @@
#define IFI_CANFD_TXSTCMD_OVERFLOW BIT(13)
#define IFI_CANFD_INTERRUPT 0xc
+#define IFI_CANFD_INTERRUPT_ERROR_BUSOFF BIT(0)
#define IFI_CANFD_INTERRUPT_ERROR_WARNING BIT(1)
+#define IFI_CANFD_INTERRUPT_ERROR_STATE_CHG BIT(2)
+#define IFI_CANFD_INTERRUPT_ERROR_REC_TEC_INC BIT(3)
#define IFI_CANFD_INTERRUPT_ERROR_COUNTER BIT(10)
#define IFI_CANFD_INTERRUPT_TXFIFO_EMPTY BIT(16)
#define IFI_CANFD_INTERRUPT_TXFIFO_REMOVE BIT(22)
@@ -61,6 +65,10 @@
#define IFI_CANFD_INTERRUPT_SET_IRQ ((u32)BIT(31))
#define IFI_CANFD_IRQMASK 0x10
+#define IFI_CANFD_IRQMASK_ERROR_BUSOFF BIT(0)
+#define IFI_CANFD_IRQMASK_ERROR_WARNING BIT(1)
+#define IFI_CANFD_IRQMASK_ERROR_STATE_CHG BIT(2)
+#define IFI_CANFD_IRQMASK_ERROR_REC_TEC_INC BIT(3)
#define IFI_CANFD_IRQMASK_SET_ERR BIT(7)
#define IFI_CANFD_IRQMASK_SET_TS BIT(15)
#define IFI_CANFD_IRQMASK_TXFIFO_EMPTY BIT(16)
@@ -136,6 +144,8 @@
#define IFI_CANFD_SYSCLOCK 0x50
#define IFI_CANFD_VER 0x54
+#define IFI_CANFD_VER_REV_MASK 0xff
+#define IFI_CANFD_VER_REV_MIN_SUPPORTED 0x15
#define IFI_CANFD_IP_ID 0x58
#define IFI_CANFD_IP_ID_VALUE 0xD073CAFD
@@ -220,7 +230,10 @@ static void ifi_canfd_irq_enable(struct net_device *ndev, bool enable)
if (enable) {
enirq = IFI_CANFD_IRQMASK_TXFIFO_EMPTY |
- IFI_CANFD_IRQMASK_RXFIFO_NEMPTY;
+ IFI_CANFD_IRQMASK_RXFIFO_NEMPTY |
+ IFI_CANFD_IRQMASK_ERROR_STATE_CHG |
+ IFI_CANFD_IRQMASK_ERROR_WARNING |
+ IFI_CANFD_IRQMASK_ERROR_BUSOFF;
if (priv->can.ctrlmode & CAN_CTRLMODE_BERR_REPORTING)
enirq |= IFI_CANFD_INTERRUPT_ERROR_COUNTER;
}
@@ -361,12 +374,13 @@ static int ifi_canfd_handle_lost_msg(struct net_device *ndev)
return 1;
}
-static int ifi_canfd_handle_lec_err(struct net_device *ndev, const u32 errctr)
+static int ifi_canfd_handle_lec_err(struct net_device *ndev)
{
struct ifi_canfd_priv *priv = netdev_priv(ndev);
struct net_device_stats *stats = &ndev->stats;
struct can_frame *cf;
struct sk_buff *skb;
+ u32 errctr = readl(priv->base + IFI_CANFD_ERROR_CTR);
const u32 errmask = IFI_CANFD_ERROR_CTR_OVERLOAD_FIRST |
IFI_CANFD_ERROR_CTR_ACK_ERROR_FIRST |
IFI_CANFD_ERROR_CTR_BIT0_ERROR_FIRST |
@@ -449,6 +463,11 @@ static int ifi_canfd_handle_state_change(struct net_device *ndev,
switch (new_state) {
case CAN_STATE_ERROR_ACTIVE:
+ /* error active state */
+ priv->can.can_stats.error_warning++;
+ priv->can.state = CAN_STATE_ERROR_ACTIVE;
+ break;
+ case CAN_STATE_ERROR_WARNING:
/* error warning state */
priv->can.can_stats.error_warning++;
priv->can.state = CAN_STATE_ERROR_WARNING;
@@ -477,7 +496,7 @@ static int ifi_canfd_handle_state_change(struct net_device *ndev,
ifi_canfd_get_berr_counter(ndev, &bec);
switch (new_state) {
- case CAN_STATE_ERROR_ACTIVE:
+ case CAN_STATE_ERROR_WARNING:
/* error warning state */
cf->can_id |= CAN_ERR_CRTL;
cf->data[1] = (bec.txerr > bec.rxerr) ?
@@ -510,22 +529,21 @@ static int ifi_canfd_handle_state_change(struct net_device *ndev,
return 1;
}
-static int ifi_canfd_handle_state_errors(struct net_device *ndev, u32 stcmd)
+static int ifi_canfd_handle_state_errors(struct net_device *ndev)
{
struct ifi_canfd_priv *priv = netdev_priv(ndev);
+ u32 stcmd = readl(priv->base + IFI_CANFD_STCMD);
int work_done = 0;
- u32 isr;
- /*
- * The ErrWarn condition is a little special, since the bit is
- * located in the INTERRUPT register instead of STCMD register.
- */
- isr = readl(priv->base + IFI_CANFD_INTERRUPT);
- if ((isr & IFI_CANFD_INTERRUPT_ERROR_WARNING) &&
+ if ((stcmd & IFI_CANFD_STCMD_ERROR_ACTIVE) &&
+ (priv->can.state != CAN_STATE_ERROR_ACTIVE)) {
+ netdev_dbg(ndev, "Error, entered active state\n");
+ work_done += ifi_canfd_handle_state_change(ndev,
+ CAN_STATE_ERROR_ACTIVE);
+ }
+
+ if ((stcmd & IFI_CANFD_STCMD_ERROR_WARNING) &&
(priv->can.state != CAN_STATE_ERROR_WARNING)) {
- /* Clear the interrupt */
- writel(IFI_CANFD_INTERRUPT_ERROR_WARNING,
- priv->base + IFI_CANFD_INTERRUPT);
netdev_dbg(ndev, "Error, entered warning state\n");
work_done += ifi_canfd_handle_state_change(ndev,
CAN_STATE_ERROR_WARNING);
@@ -552,18 +570,11 @@ static int ifi_canfd_poll(struct napi_struct *napi, int quota)
{
struct net_device *ndev = napi->dev;
struct ifi_canfd_priv *priv = netdev_priv(ndev);
- const u32 stcmd_state_mask = IFI_CANFD_STCMD_ERROR_PASSIVE |
- IFI_CANFD_STCMD_BUSOFF;
- int work_done = 0;
-
- u32 stcmd = readl(priv->base + IFI_CANFD_STCMD);
u32 rxstcmd = readl(priv->base + IFI_CANFD_RXSTCMD);
- u32 errctr = readl(priv->base + IFI_CANFD_ERROR_CTR);
+ int work_done = 0;
/* Handle bus state changes */
- if ((stcmd & stcmd_state_mask) ||
- ((stcmd & IFI_CANFD_STCMD_ERROR_ACTIVE) == 0))
- work_done += ifi_canfd_handle_state_errors(ndev, stcmd);
+ work_done += ifi_canfd_handle_state_errors(ndev);
/* Handle lost messages on RX */
if (rxstcmd & IFI_CANFD_RXSTCMD_OVERFLOW)
@@ -571,7 +582,7 @@ static int ifi_canfd_poll(struct napi_struct *napi, int quota)
/* Handle lec errors on the bus */
if (priv->can.ctrlmode & CAN_CTRLMODE_BERR_REPORTING)
- work_done += ifi_canfd_handle_lec_err(ndev, errctr);
+ work_done += ifi_canfd_handle_lec_err(ndev);
/* Handle normal messages on RX */
if (!(rxstcmd & IFI_CANFD_RXSTCMD_EMPTY))
@@ -592,12 +603,13 @@ static irqreturn_t ifi_canfd_isr(int irq, void *dev_id)
struct net_device_stats *stats = &ndev->stats;
const u32 rx_irq_mask = IFI_CANFD_INTERRUPT_RXFIFO_NEMPTY |
IFI_CANFD_INTERRUPT_RXFIFO_NEMPTY_PER |
+ IFI_CANFD_INTERRUPT_ERROR_COUNTER |
+ IFI_CANFD_INTERRUPT_ERROR_STATE_CHG |
IFI_CANFD_INTERRUPT_ERROR_WARNING |
- IFI_CANFD_INTERRUPT_ERROR_COUNTER;
+ IFI_CANFD_INTERRUPT_ERROR_BUSOFF;
const u32 tx_irq_mask = IFI_CANFD_INTERRUPT_TXFIFO_EMPTY |
IFI_CANFD_INTERRUPT_TXFIFO_REMOVE;
- const u32 clr_irq_mask = ~((u32)(IFI_CANFD_INTERRUPT_SET_IRQ |
- IFI_CANFD_INTERRUPT_ERROR_WARNING));
+ const u32 clr_irq_mask = ~((u32)IFI_CANFD_INTERRUPT_SET_IRQ);
u32 isr;
isr = readl(priv->base + IFI_CANFD_INTERRUPT);
@@ -933,7 +945,7 @@ static int ifi_canfd_plat_probe(struct platform_device *pdev)
struct resource *res;
void __iomem *addr;
int irq, ret;
- u32 id;
+ u32 id, rev;
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
addr = devm_ioremap_resource(dev, res);
@@ -947,6 +959,13 @@ static int ifi_canfd_plat_probe(struct platform_device *pdev)
return -EINVAL;
}
+ rev = readl(addr + IFI_CANFD_VER) & IFI_CANFD_VER_REV_MASK;
+ if (rev < IFI_CANFD_VER_REV_MIN_SUPPORTED) {
+ dev_err(dev, "This block is too old (rev %i), minimum supported is rev %i\n",
+ rev, IFI_CANFD_VER_REV_MIN_SUPPORTED);
+ return -EINVAL;
+ }
+
ndev = alloc_candev(sizeof(*priv), 1);
if (!ndev)
return -ENOMEM;
diff --git a/drivers/net/can/m_can/m_can.c b/drivers/net/can/m_can/m_can.c
index 2594f7779c6f14..b397a33f3d32b5 100644
--- a/drivers/net/can/m_can/m_can.c
+++ b/drivers/net/can/m_can/m_can.c
@@ -26,6 +26,7 @@
#include <linux/pm_runtime.h>
#include <linux/iopoll.h>
#include <linux/can/dev.h>
+#include <linux/pinctrl/consumer.h>
/* napi related */
#define M_CAN_NAPI_WEIGHT 64
@@ -253,7 +254,7 @@ enum m_can_mram_cfg {
/* Rx FIFO 0/1 Configuration (RXF0C/RXF1C) */
#define RXFC_FWM_SHIFT 24
-#define RXFC_FWM_MASK (0x7f < RXFC_FWM_SHIFT)
+#define RXFC_FWM_MASK (0x7f << RXFC_FWM_SHIFT)
#define RXFC_FS_SHIFT 16
#define RXFC_FS_MASK (0x7f << RXFC_FS_SHIFT)
@@ -1700,6 +1701,8 @@ static __maybe_unused int m_can_suspend(struct device *dev)
m_can_clk_stop(priv);
}
+ pinctrl_pm_select_sleep_state(dev);
+
priv->can.state = CAN_STATE_SLEEPING;
return 0;
@@ -1710,6 +1713,8 @@ static __maybe_unused int m_can_resume(struct device *dev)
struct net_device *ndev = dev_get_drvdata(dev);
struct m_can_priv *priv = netdev_priv(ndev);
+ pinctrl_pm_select_default_state(dev);
+
m_can_init_ram(priv);
priv->can.state = CAN_STATE_ERROR_ACTIVE;
diff --git a/drivers/net/can/peak_canfd/peak_canfd.c b/drivers/net/can/peak_canfd/peak_canfd.c
index 55513411a82e68..ed8561d4a90f4b 100644
--- a/drivers/net/can/peak_canfd/peak_canfd.c
+++ b/drivers/net/can/peak_canfd/peak_canfd.c
@@ -262,7 +262,6 @@ static int pucan_handle_can_rx(struct peak_canfd_priv *priv,
spin_lock_irqsave(&priv->echo_lock, flags);
can_get_echo_skb(priv->ndev, msg->client);
- spin_unlock_irqrestore(&priv->echo_lock, flags);
/* count bytes of the echo instead of skb */
stats->tx_bytes += cf_len;
@@ -271,6 +270,7 @@ static int pucan_handle_can_rx(struct peak_canfd_priv *priv,
/* restart tx queue (a slot is free) */
netif_wake_queue(priv->ndev);
+ spin_unlock_irqrestore(&priv->echo_lock, flags);
return 0;
}
@@ -333,7 +333,6 @@ static int pucan_handle_status(struct peak_canfd_priv *priv,
/* this STATUS is the CNF of the RX_BARRIER: Tx path can be setup */
if (pucan_status_is_rx_barrier(msg)) {
- unsigned long flags;
if (priv->enable_tx_path) {
int err = priv->enable_tx_path(priv);
@@ -342,16 +341,8 @@ static int pucan_handle_status(struct peak_canfd_priv *priv,
return err;
}
- /* restart network queue only if echo skb array is free */
- spin_lock_irqsave(&priv->echo_lock, flags);
-
- if (!priv->can.echo_skb[priv->echo_idx]) {
- spin_unlock_irqrestore(&priv->echo_lock, flags);
-
- netif_wake_queue(ndev);
- } else {
- spin_unlock_irqrestore(&priv->echo_lock, flags);
- }
+ /* start network queue (echo_skb array is empty) */
+ netif_start_queue(ndev);
return 0;
}
@@ -726,11 +717,6 @@ static netdev_tx_t peak_canfd_start_xmit(struct sk_buff *skb,
*/
should_stop_tx_queue = !!(priv->can.echo_skb[priv->echo_idx]);
- spin_unlock_irqrestore(&priv->echo_lock, flags);
-
- /* write the skb on the interface */
- priv->write_tx_msg(priv, msg);
-
/* stop network tx queue if not enough room to save one more msg too */
if (priv->can.ctrlmode & CAN_CTRLMODE_FD)
should_stop_tx_queue |= (room_left <
@@ -742,6 +728,11 @@ static netdev_tx_t peak_canfd_start_xmit(struct sk_buff *skb,
if (should_stop_tx_queue)
netif_stop_queue(ndev);
+ spin_unlock_irqrestore(&priv->echo_lock, flags);
+
+ /* write the skb on the interface */
+ priv->write_tx_msg(priv, msg);
+
return NETDEV_TX_OK;
}
diff --git a/drivers/net/can/peak_canfd/peak_pciefd_main.c b/drivers/net/can/peak_canfd/peak_pciefd_main.c
index 788c3464a3b0e9..3c51a884db87bc 100644
--- a/drivers/net/can/peak_canfd/peak_pciefd_main.c
+++ b/drivers/net/can/peak_canfd/peak_pciefd_main.c
@@ -349,8 +349,12 @@ static irqreturn_t pciefd_irq_handler(int irq, void *arg)
priv->tx_pages_free++;
spin_unlock_irqrestore(&priv->tx_lock, flags);
- /* wake producer up */
- netif_wake_queue(priv->ucan.ndev);
+ /* wake producer up (only if enough room in echo_skb array) */
+ spin_lock_irqsave(&priv->ucan.echo_lock, flags);
+ if (!priv->ucan.can.echo_skb[priv->ucan.echo_idx])
+ netif_wake_queue(priv->ucan.ndev);
+
+ spin_unlock_irqrestore(&priv->ucan.echo_lock, flags);
}
/* re-enable Rx DMA transfer for this CAN */
diff --git a/drivers/net/dsa/Makefile b/drivers/net/dsa/Makefile
index d040aeb4517266..15c2a831edf192 100644
--- a/drivers/net/dsa/Makefile
+++ b/drivers/net/dsa/Makefile
@@ -1,7 +1,10 @@
# SPDX-License-Identifier: GPL-2.0
obj-$(CONFIG_NET_DSA_BCM_SF2) += bcm-sf2.o
bcm-sf2-objs := bcm_sf2.o bcm_sf2_cfp.o
-obj-$(CONFIG_NET_DSA_LOOP) += dsa_loop.o dsa_loop_bdinfo.o
+obj-$(CONFIG_NET_DSA_LOOP) += dsa_loop.o
+ifdef CONFIG_NET_DSA_LOOP
+obj-$(CONFIG_FIXED_PHY) += dsa_loop_bdinfo.o
+endif
obj-$(CONFIG_NET_DSA_MT7530) += mt7530.o
obj-$(CONFIG_NET_DSA_MV88E6060) += mv88e6060.o
obj-$(CONFIG_NET_DSA_QCA8K) += qca8k.o
diff --git a/drivers/net/dsa/b53/b53_common.c b/drivers/net/dsa/b53/b53_common.c
index db830a1141d997..63e02a54d53792 100644
--- a/drivers/net/dsa/b53/b53_common.c
+++ b/drivers/net/dsa/b53/b53_common.c
@@ -814,8 +814,8 @@ void b53_get_strings(struct dsa_switch *ds, int port, uint8_t *data)
unsigned int i;
for (i = 0; i < mib_size; i++)
- memcpy(data + i * ETH_GSTRING_LEN,
- mibs[i].name, ETH_GSTRING_LEN);
+ strlcpy(data + i * ETH_GSTRING_LEN,
+ mibs[i].name, ETH_GSTRING_LEN);
}
EXPORT_SYMBOL(b53_get_strings);
diff --git a/drivers/net/ethernet/8390/Kconfig b/drivers/net/ethernet/8390/Kconfig
index 29c3075bfb052f..fdc673484addcf 100644
--- a/drivers/net/ethernet/8390/Kconfig
+++ b/drivers/net/ethernet/8390/Kconfig
@@ -3,7 +3,7 @@
#
config NET_VENDOR_8390
- bool "National Semi-conductor 8390 devices"
+ bool "National Semiconductor 8390 devices"
default y
depends on NET_VENDOR_NATSEMI
---help---
diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_cfg.h b/drivers/net/ethernet/aquantia/atlantic/aq_cfg.h
index 0b49f1aeebd3dd..fc7383106946ca 100644
--- a/drivers/net/ethernet/aquantia/atlantic/aq_cfg.h
+++ b/drivers/net/ethernet/aquantia/atlantic/aq_cfg.h
@@ -36,6 +36,8 @@
#define AQ_CFG_TX_FRAME_MAX (16U * 1024U)
#define AQ_CFG_RX_FRAME_MAX (4U * 1024U)
+#define AQ_CFG_TX_CLEAN_BUDGET 256U
+
/* LRO */
#define AQ_CFG_IS_LRO_DEF 1U
diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_nic.c b/drivers/net/ethernet/aquantia/atlantic/aq_nic.c
index ebbaf63eaf4751..c96a92118b8b85 100644
--- a/drivers/net/ethernet/aquantia/atlantic/aq_nic.c
+++ b/drivers/net/ethernet/aquantia/atlantic/aq_nic.c
@@ -247,6 +247,8 @@ void aq_nic_ndev_init(struct aq_nic_s *self)
self->ndev->hw_features |= aq_hw_caps->hw_features;
self->ndev->features = aq_hw_caps->hw_features;
self->ndev->priv_flags = aq_hw_caps->hw_priv_flags;
+ self->ndev->priv_flags |= IFF_LIVE_ADDR_CHANGE;
+
self->ndev->mtu = aq_nic_cfg->mtu - ETH_HLEN;
self->ndev->max_mtu = aq_hw_caps->mtu - ETH_FCS_LEN - ETH_HLEN;
@@ -937,3 +939,23 @@ err_exit:
out:
return err;
}
+
+void aq_nic_shutdown(struct aq_nic_s *self)
+{
+ int err = 0;
+
+ if (!self->ndev)
+ return;
+
+ rtnl_lock();
+
+ netif_device_detach(self->ndev);
+
+ err = aq_nic_stop(self);
+ if (err < 0)
+ goto err_exit;
+ aq_nic_deinit(self);
+
+err_exit:
+ rtnl_unlock();
+} \ No newline at end of file
diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_nic.h b/drivers/net/ethernet/aquantia/atlantic/aq_nic.h
index d16b0f1a95aa48..219b550d16650b 100644
--- a/drivers/net/ethernet/aquantia/atlantic/aq_nic.h
+++ b/drivers/net/ethernet/aquantia/atlantic/aq_nic.h
@@ -118,5 +118,6 @@ struct aq_nic_cfg_s *aq_nic_get_cfg(struct aq_nic_s *self);
u32 aq_nic_get_fw_version(struct aq_nic_s *self);
int aq_nic_change_pm_state(struct aq_nic_s *self, pm_message_t *pm_msg);
int aq_nic_update_interrupt_moderation_settings(struct aq_nic_s *self);
+void aq_nic_shutdown(struct aq_nic_s *self);
#endif /* AQ_NIC_H */
diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_pci_func.c b/drivers/net/ethernet/aquantia/atlantic/aq_pci_func.c
index 87c4308b52a7cc..ecc6306f940f5d 100644
--- a/drivers/net/ethernet/aquantia/atlantic/aq_pci_func.c
+++ b/drivers/net/ethernet/aquantia/atlantic/aq_pci_func.c
@@ -323,6 +323,20 @@ static void aq_pci_remove(struct pci_dev *pdev)
pci_disable_device(pdev);
}
+static void aq_pci_shutdown(struct pci_dev *pdev)
+{
+ struct aq_nic_s *self = pci_get_drvdata(pdev);
+
+ aq_nic_shutdown(self);
+
+ pci_disable_device(pdev);
+
+ if (system_state == SYSTEM_POWER_OFF) {
+ pci_wake_from_d3(pdev, false);
+ pci_set_power_state(pdev, PCI_D3hot);
+ }
+}
+
static int aq_pci_suspend(struct pci_dev *pdev, pm_message_t pm_msg)
{
struct aq_nic_s *self = pci_get_drvdata(pdev);
@@ -345,6 +359,7 @@ static struct pci_driver aq_pci_ops = {
.remove = aq_pci_remove,
.suspend = aq_pci_suspend,
.resume = aq_pci_resume,
+ .shutdown = aq_pci_shutdown,
};
module_pci_driver(aq_pci_ops);
diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_ring.c b/drivers/net/ethernet/aquantia/atlantic/aq_ring.c
index 0be6a11370bb3e..b5f1f62e8e2537 100644
--- a/drivers/net/ethernet/aquantia/atlantic/aq_ring.c
+++ b/drivers/net/ethernet/aquantia/atlantic/aq_ring.c
@@ -136,11 +136,12 @@ void aq_ring_queue_stop(struct aq_ring_s *ring)
netif_stop_subqueue(ndev, ring->idx);
}
-void aq_ring_tx_clean(struct aq_ring_s *self)
+bool aq_ring_tx_clean(struct aq_ring_s *self)
{
struct device *dev = aq_nic_get_dev(self->aq_nic);
+ unsigned int budget = AQ_CFG_TX_CLEAN_BUDGET;
- for (; self->sw_head != self->hw_head;
+ for (; self->sw_head != self->hw_head && budget--;
self->sw_head = aq_ring_next_dx(self, self->sw_head)) {
struct aq_ring_buff_s *buff = &self->buff_ring[self->sw_head];
@@ -167,6 +168,8 @@ void aq_ring_tx_clean(struct aq_ring_s *self)
buff->pa = 0U;
buff->eop_index = 0xffffU;
}
+
+ return !!budget;
}
#define AQ_SKB_ALIGN SKB_DATA_ALIGN(sizeof(struct skb_shared_info))
diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_ring.h b/drivers/net/ethernet/aquantia/atlantic/aq_ring.h
index 965fae0fb6e0dd..ac1329f4051d7f 100644
--- a/drivers/net/ethernet/aquantia/atlantic/aq_ring.h
+++ b/drivers/net/ethernet/aquantia/atlantic/aq_ring.h
@@ -153,7 +153,7 @@ void aq_ring_free(struct aq_ring_s *self);
void aq_ring_update_queue_state(struct aq_ring_s *ring);
void aq_ring_queue_wake(struct aq_ring_s *ring);
void aq_ring_queue_stop(struct aq_ring_s *ring);
-void aq_ring_tx_clean(struct aq_ring_s *self);
+bool aq_ring_tx_clean(struct aq_ring_s *self);
int aq_ring_rx_clean(struct aq_ring_s *self,
struct napi_struct *napi,
int *work_done,
diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_vec.c b/drivers/net/ethernet/aquantia/atlantic/aq_vec.c
index f890b8a5a8623e..d335c334fa561e 100644
--- a/drivers/net/ethernet/aquantia/atlantic/aq_vec.c
+++ b/drivers/net/ethernet/aquantia/atlantic/aq_vec.c
@@ -35,12 +35,12 @@ struct aq_vec_s {
static int aq_vec_poll(struct napi_struct *napi, int budget)
{
struct aq_vec_s *self = container_of(napi, struct aq_vec_s, napi);
+ unsigned int sw_tail_old = 0U;
struct aq_ring_s *ring = NULL;
+ bool was_tx_cleaned = true;
+ unsigned int i = 0U;
int work_done = 0;
int err = 0;
- unsigned int i = 0U;
- unsigned int sw_tail_old = 0U;
- bool was_tx_cleaned = false;
if (!self) {
err = -EINVAL;
@@ -57,9 +57,8 @@ static int aq_vec_poll(struct napi_struct *napi, int budget)
if (ring[AQ_VEC_TX_ID].sw_head !=
ring[AQ_VEC_TX_ID].hw_head) {
- aq_ring_tx_clean(&ring[AQ_VEC_TX_ID]);
+ was_tx_cleaned = aq_ring_tx_clean(&ring[AQ_VEC_TX_ID]);
aq_ring_update_queue_state(&ring[AQ_VEC_TX_ID]);
- was_tx_cleaned = true;
}
err = self->aq_hw_ops->hw_ring_rx_receive(self->aq_hw,
@@ -90,7 +89,7 @@ static int aq_vec_poll(struct napi_struct *napi, int budget)
}
}
- if (was_tx_cleaned)
+ if (!was_tx_cleaned)
work_done = budget;
if (work_done < budget) {
diff --git a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils.c b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils.c
index 967f0fd07fcf2d..d3b847ec7465cc 100644
--- a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils.c
+++ b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils.c
@@ -21,6 +21,10 @@
#define HW_ATL_UCP_0X370_REG 0x0370U
+#define HW_ATL_MIF_CMD 0x0200U
+#define HW_ATL_MIF_ADDR 0x0208U
+#define HW_ATL_MIF_VAL 0x020CU
+
#define HW_ATL_FW_SM_RAM 0x2U
#define HW_ATL_MPI_FW_VERSION 0x18
#define HW_ATL_MPI_CONTROL_ADR 0x0368U
@@ -79,16 +83,15 @@ int hw_atl_utils_initfw(struct aq_hw_s *self, const struct aq_fw_ops **fw_ops)
static int hw_atl_utils_soft_reset_flb(struct aq_hw_s *self)
{
+ u32 gsr, val;
int k = 0;
- u32 gsr;
aq_hw_write_reg(self, 0x404, 0x40e1);
AQ_HW_SLEEP(50);
/* Cleanup SPI */
- aq_hw_write_reg(self, 0x534, 0xA0);
- aq_hw_write_reg(self, 0x100, 0x9F);
- aq_hw_write_reg(self, 0x100, 0x809F);
+ val = aq_hw_read_reg(self, 0x53C);
+ aq_hw_write_reg(self, 0x53C, val | 0x10);
gsr = aq_hw_read_reg(self, HW_ATL_GLB_SOFT_RES_ADR);
aq_hw_write_reg(self, HW_ATL_GLB_SOFT_RES_ADR, (gsr & 0xBFFF) | 0x8000);
@@ -97,7 +100,14 @@ static int hw_atl_utils_soft_reset_flb(struct aq_hw_s *self)
aq_hw_write_reg(self, 0x404, 0x80e0);
aq_hw_write_reg(self, 0x32a8, 0x0);
aq_hw_write_reg(self, 0x520, 0x1);
+
+ /* Reset SPI again because of possible interrupted SPI burst */
+ val = aq_hw_read_reg(self, 0x53C);
+ aq_hw_write_reg(self, 0x53C, val | 0x10);
AQ_HW_SLEEP(10);
+ /* Clear SPI reset state */
+ aq_hw_write_reg(self, 0x53C, val & ~0x10);
+
aq_hw_write_reg(self, 0x404, 0x180e0);
for (k = 0; k < 1000; k++) {
@@ -141,13 +151,15 @@ static int hw_atl_utils_soft_reset_flb(struct aq_hw_s *self)
aq_pr_err("FW kickstart failed\n");
return -EIO;
}
+ /* Old FW requires fixed delay after init */
+ AQ_HW_SLEEP(15);
return 0;
}
static int hw_atl_utils_soft_reset_rbl(struct aq_hw_s *self)
{
- u32 gsr, rbl_status;
+ u32 gsr, val, rbl_status;
int k;
aq_hw_write_reg(self, 0x404, 0x40e1);
@@ -157,6 +169,10 @@ static int hw_atl_utils_soft_reset_rbl(struct aq_hw_s *self)
/* Alter RBL status */
aq_hw_write_reg(self, 0x388, 0xDEAD);
+ /* Cleanup SPI */
+ val = aq_hw_read_reg(self, 0x53C);
+ aq_hw_write_reg(self, 0x53C, val | 0x10);
+
/* Global software reset*/
hw_atl_rx_rx_reg_res_dis_set(self, 0U);
hw_atl_tx_tx_reg_res_dis_set(self, 0U);
@@ -204,6 +220,8 @@ static int hw_atl_utils_soft_reset_rbl(struct aq_hw_s *self)
aq_pr_err("FW kickstart failed\n");
return -EIO;
}
+ /* Old FW requires fixed delay after init */
+ AQ_HW_SLEEP(15);
return 0;
}
@@ -255,18 +273,22 @@ int hw_atl_utils_fw_downld_dwords(struct aq_hw_s *self, u32 a,
}
}
- aq_hw_write_reg(self, 0x00000208U, a);
-
- for (++cnt; --cnt;) {
- u32 i = 0U;
+ aq_hw_write_reg(self, HW_ATL_MIF_ADDR, a);
- aq_hw_write_reg(self, 0x00000200U, 0x00008000U);
+ for (++cnt; --cnt && !err;) {
+ aq_hw_write_reg(self, HW_ATL_MIF_CMD, 0x00008000U);
- for (i = 1024U;
- (0x100U & aq_hw_read_reg(self, 0x00000200U)) && --i;) {
- }
+ if (IS_CHIP_FEATURE(REVISION_B1))
+ AQ_HW_WAIT_FOR(a != aq_hw_read_reg(self,
+ HW_ATL_MIF_ADDR),
+ 1, 1000U);
+ else
+ AQ_HW_WAIT_FOR(!(0x100 & aq_hw_read_reg(self,
+ HW_ATL_MIF_CMD)),
+ 1, 1000U);
- *(p++) = aq_hw_read_reg(self, 0x0000020CU);
+ *(p++) = aq_hw_read_reg(self, HW_ATL_MIF_VAL);
+ a += 4;
}
hw_atl_reg_glb_cpu_sem_set(self, 1U, HW_ATL_FW_SM_RAM);
@@ -662,14 +684,18 @@ void hw_atl_utils_hw_chip_features_init(struct aq_hw_s *self, u32 *p)
u32 val = hw_atl_reg_glb_mif_id_get(self);
u32 mif_rev = val & 0xFFU;
- if ((3U & mif_rev) == 1U) {
- chip_features |=
- HAL_ATLANTIC_UTILS_CHIP_REVISION_A0 |
+ if ((0xFU & mif_rev) == 1U) {
+ chip_features |= HAL_ATLANTIC_UTILS_CHIP_REVISION_A0 |
HAL_ATLANTIC_UTILS_CHIP_MPI_AQ |
HAL_ATLANTIC_UTILS_CHIP_MIPS;
- } else if ((3U & mif_rev) == 2U) {
- chip_features |=
- HAL_ATLANTIC_UTILS_CHIP_REVISION_B0 |
+ } else if ((0xFU & mif_rev) == 2U) {
+ chip_features |= HAL_ATLANTIC_UTILS_CHIP_REVISION_B0 |
+ HAL_ATLANTIC_UTILS_CHIP_MPI_AQ |
+ HAL_ATLANTIC_UTILS_CHIP_MIPS |
+ HAL_ATLANTIC_UTILS_CHIP_TPO2 |
+ HAL_ATLANTIC_UTILS_CHIP_RPF2;
+ } else if ((0xFU & mif_rev) == 0xAU) {
+ chip_features |= HAL_ATLANTIC_UTILS_CHIP_REVISION_B1 |
HAL_ATLANTIC_UTILS_CHIP_MPI_AQ |
HAL_ATLANTIC_UTILS_CHIP_MIPS |
HAL_ATLANTIC_UTILS_CHIP_TPO2 |
diff --git a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils.h b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils.h
index 2c690947910a39..cd8f18f39c611f 100644
--- a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils.h
+++ b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils.h
@@ -161,6 +161,7 @@ struct __packed hw_aq_atl_utils_mbox {
#define HAL_ATLANTIC_UTILS_CHIP_MPI_AQ 0x00000010U
#define HAL_ATLANTIC_UTILS_CHIP_REVISION_A0 0x01000000U
#define HAL_ATLANTIC_UTILS_CHIP_REVISION_B0 0x02000000U
+#define HAL_ATLANTIC_UTILS_CHIP_REVISION_B1 0x04000000U
#define IS_CHIP_FEATURE(_F_) (HAL_ATLANTIC_UTILS_CHIP_##_F_ & \
self->chip_features)
diff --git a/drivers/net/ethernet/aquantia/atlantic/ver.h b/drivers/net/ethernet/aquantia/atlantic/ver.h
index 5265b937677bca..a445de6837a6c8 100644
--- a/drivers/net/ethernet/aquantia/atlantic/ver.h
+++ b/drivers/net/ethernet/aquantia/atlantic/ver.h
@@ -13,7 +13,7 @@
#define NIC_MAJOR_DRIVER_VERSION 2
#define NIC_MINOR_DRIVER_VERSION 0
#define NIC_BUILD_DRIVER_VERSION 2
-#define NIC_REVISION_DRIVER_VERSION 0
+#define NIC_REVISION_DRIVER_VERSION 1
#define AQ_CFG_DRV_VERSION_SUFFIX "-kern"
diff --git a/drivers/net/ethernet/arc/emac_rockchip.c b/drivers/net/ethernet/arc/emac_rockchip.c
index 16f9bee992fedf..0f65768026072a 100644
--- a/drivers/net/ethernet/arc/emac_rockchip.c
+++ b/drivers/net/ethernet/arc/emac_rockchip.c
@@ -169,8 +169,10 @@ static int emac_rockchip_probe(struct platform_device *pdev)
/* Optional regulator for PHY */
priv->regulator = devm_regulator_get_optional(dev, "phy");
if (IS_ERR(priv->regulator)) {
- if (PTR_ERR(priv->regulator) == -EPROBE_DEFER)
- return -EPROBE_DEFER;
+ if (PTR_ERR(priv->regulator) == -EPROBE_DEFER) {
+ err = -EPROBE_DEFER;
+ goto out_clk_disable;
+ }
dev_err(dev, "no regulator found\n");
priv->regulator = NULL;
}
diff --git a/drivers/net/ethernet/broadcom/bcmsysport.c b/drivers/net/ethernet/broadcom/bcmsysport.c
index f15a8fc6dfc974..3fc549b88c43b0 100644
--- a/drivers/net/ethernet/broadcom/bcmsysport.c
+++ b/drivers/net/ethernet/broadcom/bcmsysport.c
@@ -855,10 +855,12 @@ static void bcm_sysport_tx_reclaim_one(struct bcm_sysport_tx_ring *ring,
static unsigned int __bcm_sysport_tx_reclaim(struct bcm_sysport_priv *priv,
struct bcm_sysport_tx_ring *ring)
{
- unsigned int c_index, last_c_index, last_tx_cn, num_tx_cbs;
unsigned int pkts_compl = 0, bytes_compl = 0;
struct net_device *ndev = priv->netdev;
+ unsigned int txbds_processed = 0;
struct bcm_sysport_cb *cb;
+ unsigned int txbds_ready;
+ unsigned int c_index;
u32 hw_ind;
/* Clear status before servicing to reduce spurious interrupts */
@@ -871,29 +873,23 @@ static unsigned int __bcm_sysport_tx_reclaim(struct bcm_sysport_priv *priv,
/* Compute how many descriptors have been processed since last call */
hw_ind = tdma_readl(priv, TDMA_DESC_RING_PROD_CONS_INDEX(ring->index));
c_index = (hw_ind >> RING_CONS_INDEX_SHIFT) & RING_CONS_INDEX_MASK;
- ring->p_index = (hw_ind & RING_PROD_INDEX_MASK);
-
- last_c_index = ring->c_index;
- num_tx_cbs = ring->size;
-
- c_index &= (num_tx_cbs - 1);
-
- if (c_index >= last_c_index)
- last_tx_cn = c_index - last_c_index;
- else
- last_tx_cn = num_tx_cbs - last_c_index + c_index;
+ txbds_ready = (c_index - ring->c_index) & RING_CONS_INDEX_MASK;
netif_dbg(priv, tx_done, ndev,
- "ring=%d c_index=%d last_tx_cn=%d last_c_index=%d\n",
- ring->index, c_index, last_tx_cn, last_c_index);
+ "ring=%d old_c_index=%u c_index=%u txbds_ready=%u\n",
+ ring->index, ring->c_index, c_index, txbds_ready);
- while (last_tx_cn-- > 0) {
- cb = ring->cbs + last_c_index;
+ while (txbds_processed < txbds_ready) {
+ cb = &ring->cbs[ring->clean_index];
bcm_sysport_tx_reclaim_one(ring, cb, &bytes_compl, &pkts_compl);
ring->desc_count++;
- last_c_index++;
- last_c_index &= (num_tx_cbs - 1);
+ txbds_processed++;
+
+ if (likely(ring->clean_index < ring->size - 1))
+ ring->clean_index++;
+ else
+ ring->clean_index = 0;
}
u64_stats_update_begin(&priv->syncp);
@@ -1394,6 +1390,7 @@ static int bcm_sysport_init_tx_ring(struct bcm_sysport_priv *priv,
netif_tx_napi_add(priv->netdev, &ring->napi, bcm_sysport_tx_poll, 64);
ring->index = index;
ring->size = size;
+ ring->clean_index = 0;
ring->alloc_size = ring->size;
ring->desc_cpu = p;
ring->desc_count = ring->size;
diff --git a/drivers/net/ethernet/broadcom/bcmsysport.h b/drivers/net/ethernet/broadcom/bcmsysport.h
index f5a984c1c98653..19c91c76e32763 100644
--- a/drivers/net/ethernet/broadcom/bcmsysport.h
+++ b/drivers/net/ethernet/broadcom/bcmsysport.h
@@ -706,7 +706,7 @@ struct bcm_sysport_tx_ring {
unsigned int desc_count; /* Number of descriptors */
unsigned int curr_desc; /* Current descriptor */
unsigned int c_index; /* Last consumer index */
- unsigned int p_index; /* Current producer index */
+ unsigned int clean_index; /* Current clean index */
struct bcm_sysport_cb *cbs; /* Transmit control blocks */
struct dma_desc *desc_cpu; /* CPU view of the descriptor */
struct bcm_sysport_priv *priv; /* private context backpointer */
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
index 74fc9af4aadb43..b8388e93520a1a 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
@@ -13913,7 +13913,7 @@ static void bnx2x_register_phc(struct bnx2x *bp)
bp->ptp_clock = ptp_clock_register(&bp->ptp_clock_info, &bp->pdev->dev);
if (IS_ERR(bp->ptp_clock)) {
bp->ptp_clock = NULL;
- BNX2X_ERR("PTP clock registeration failed\n");
+ BNX2X_ERR("PTP clock registration failed\n");
}
}
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.c b/drivers/net/ethernet/broadcom/bnxt/bnxt.c
index 1500243b988650..c7e5e6f09647d5 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.c
@@ -1439,7 +1439,7 @@ static inline struct sk_buff *bnxt_tpa_end(struct bnxt *bp,
(skb->dev->features & NETIF_F_HW_VLAN_CTAG_RX)) {
u16 vlan_proto = tpa_info->metadata >>
RX_CMP_FLAGS2_METADATA_TPID_SFT;
- u16 vtag = tpa_info->metadata & RX_CMP_FLAGS2_METADATA_VID_MASK;
+ u16 vtag = tpa_info->metadata & RX_CMP_FLAGS2_METADATA_TCI_MASK;
__vlan_hwaccel_put_tag(skb, htons(vlan_proto), vtag);
}
@@ -1623,7 +1623,7 @@ static int bnxt_rx_pkt(struct bnxt *bp, struct bnxt_napi *bnapi, u32 *raw_cons,
cpu_to_le32(RX_CMP_FLAGS2_META_FORMAT_VLAN)) &&
(skb->dev->features & NETIF_F_HW_VLAN_CTAG_RX)) {
u32 meta_data = le32_to_cpu(rxcmp1->rx_cmp_meta_data);
- u16 vtag = meta_data & RX_CMP_FLAGS2_METADATA_VID_MASK;
+ u16 vtag = meta_data & RX_CMP_FLAGS2_METADATA_TCI_MASK;
u16 vlan_proto = meta_data >> RX_CMP_FLAGS2_METADATA_TPID_SFT;
__vlan_hwaccel_put_tag(skb, htons(vlan_proto), vtag);
@@ -3847,6 +3847,9 @@ static int bnxt_hwrm_vnic_set_tpa(struct bnxt *bp, u16 vnic_id, u32 tpa_flags)
struct bnxt_vnic_info *vnic = &bp->vnic_info[vnic_id];
struct hwrm_vnic_tpa_cfg_input req = {0};
+ if (vnic->fw_vnic_id == INVALID_HW_RING_ID)
+ return 0;
+
bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_VNIC_TPA_CFG, -1, -1);
if (tpa_flags) {
@@ -4558,18 +4561,17 @@ int __bnxt_hwrm_get_tx_rings(struct bnxt *bp, u16 fid, int *tx_rings)
return rc;
}
-static int
-bnxt_hwrm_reserve_pf_rings(struct bnxt *bp, int tx_rings, int rx_rings,
- int ring_grps, int cp_rings, int vnics)
+static void
+__bnxt_hwrm_reserve_pf_rings(struct bnxt *bp, struct hwrm_func_cfg_input *req,
+ int tx_rings, int rx_rings, int ring_grps,
+ int cp_rings, int vnics)
{
- struct hwrm_func_cfg_input req = {0};
u32 enables = 0;
- int rc;
- bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_CFG, -1, -1);
- req.fid = cpu_to_le16(0xffff);
+ bnxt_hwrm_cmd_hdr_init(bp, req, HWRM_FUNC_CFG, -1, -1);
+ req->fid = cpu_to_le16(0xffff);
enables |= tx_rings ? FUNC_CFG_REQ_ENABLES_NUM_TX_RINGS : 0;
- req.num_tx_rings = cpu_to_le16(tx_rings);
+ req->num_tx_rings = cpu_to_le16(tx_rings);
if (bp->flags & BNXT_FLAG_NEW_RM) {
enables |= rx_rings ? FUNC_CFG_REQ_ENABLES_NUM_RX_RINGS : 0;
enables |= cp_rings ? FUNC_CFG_REQ_ENABLES_NUM_CMPL_RINGS |
@@ -4578,16 +4580,53 @@ bnxt_hwrm_reserve_pf_rings(struct bnxt *bp, int tx_rings, int rx_rings,
FUNC_CFG_REQ_ENABLES_NUM_HW_RING_GRPS : 0;
enables |= vnics ? FUNC_VF_CFG_REQ_ENABLES_NUM_VNICS : 0;
- req.num_rx_rings = cpu_to_le16(rx_rings);
- req.num_hw_ring_grps = cpu_to_le16(ring_grps);
- req.num_cmpl_rings = cpu_to_le16(cp_rings);
- req.num_stat_ctxs = req.num_cmpl_rings;
- req.num_vnics = cpu_to_le16(vnics);
+ req->num_rx_rings = cpu_to_le16(rx_rings);
+ req->num_hw_ring_grps = cpu_to_le16(ring_grps);
+ req->num_cmpl_rings = cpu_to_le16(cp_rings);
+ req->num_stat_ctxs = req->num_cmpl_rings;
+ req->num_vnics = cpu_to_le16(vnics);
}
- if (!enables)
+ req->enables = cpu_to_le32(enables);
+}
+
+static void
+__bnxt_hwrm_reserve_vf_rings(struct bnxt *bp,
+ struct hwrm_func_vf_cfg_input *req, int tx_rings,
+ int rx_rings, int ring_grps, int cp_rings,
+ int vnics)
+{
+ u32 enables = 0;
+
+ bnxt_hwrm_cmd_hdr_init(bp, req, HWRM_FUNC_VF_CFG, -1, -1);
+ enables |= tx_rings ? FUNC_VF_CFG_REQ_ENABLES_NUM_TX_RINGS : 0;
+ enables |= rx_rings ? FUNC_VF_CFG_REQ_ENABLES_NUM_RX_RINGS : 0;
+ enables |= cp_rings ? FUNC_VF_CFG_REQ_ENABLES_NUM_CMPL_RINGS |
+ FUNC_VF_CFG_REQ_ENABLES_NUM_STAT_CTXS : 0;
+ enables |= ring_grps ? FUNC_VF_CFG_REQ_ENABLES_NUM_HW_RING_GRPS : 0;
+ enables |= vnics ? FUNC_VF_CFG_REQ_ENABLES_NUM_VNICS : 0;
+
+ req->num_tx_rings = cpu_to_le16(tx_rings);
+ req->num_rx_rings = cpu_to_le16(rx_rings);
+ req->num_hw_ring_grps = cpu_to_le16(ring_grps);
+ req->num_cmpl_rings = cpu_to_le16(cp_rings);
+ req->num_stat_ctxs = req->num_cmpl_rings;
+ req->num_vnics = cpu_to_le16(vnics);
+
+ req->enables = cpu_to_le32(enables);
+}
+
+static int
+bnxt_hwrm_reserve_pf_rings(struct bnxt *bp, int tx_rings, int rx_rings,
+ int ring_grps, int cp_rings, int vnics)
+{
+ struct hwrm_func_cfg_input req = {0};
+ int rc;
+
+ __bnxt_hwrm_reserve_pf_rings(bp, &req, tx_rings, rx_rings, ring_grps,
+ cp_rings, vnics);
+ if (!req.enables)
return 0;
- req.enables = cpu_to_le32(enables);
rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
if (rc)
return -ENOMEM;
@@ -4604,7 +4643,6 @@ bnxt_hwrm_reserve_vf_rings(struct bnxt *bp, int tx_rings, int rx_rings,
int ring_grps, int cp_rings, int vnics)
{
struct hwrm_func_vf_cfg_input req = {0};
- u32 enables = 0;
int rc;
if (!(bp->flags & BNXT_FLAG_NEW_RM)) {
@@ -4612,22 +4650,8 @@ bnxt_hwrm_reserve_vf_rings(struct bnxt *bp, int tx_rings, int rx_rings,
return 0;
}
- bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_VF_CFG, -1, -1);
- enables |= tx_rings ? FUNC_VF_CFG_REQ_ENABLES_NUM_TX_RINGS : 0;
- enables |= rx_rings ? FUNC_VF_CFG_REQ_ENABLES_NUM_RX_RINGS : 0;
- enables |= cp_rings ? FUNC_VF_CFG_REQ_ENABLES_NUM_CMPL_RINGS |
- FUNC_VF_CFG_REQ_ENABLES_NUM_STAT_CTXS : 0;
- enables |= ring_grps ? FUNC_VF_CFG_REQ_ENABLES_NUM_HW_RING_GRPS : 0;
- enables |= vnics ? FUNC_VF_CFG_REQ_ENABLES_NUM_VNICS : 0;
-
- req.num_tx_rings = cpu_to_le16(tx_rings);
- req.num_rx_rings = cpu_to_le16(rx_rings);
- req.num_hw_ring_grps = cpu_to_le16(ring_grps);
- req.num_cmpl_rings = cpu_to_le16(cp_rings);
- req.num_stat_ctxs = req.num_cmpl_rings;
- req.num_vnics = cpu_to_le16(vnics);
-
- req.enables = cpu_to_le32(enables);
+ __bnxt_hwrm_reserve_vf_rings(bp, &req, tx_rings, rx_rings, ring_grps,
+ cp_rings, vnics);
rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
if (rc)
return -ENOMEM;
@@ -4743,39 +4767,25 @@ static bool bnxt_need_reserve_rings(struct bnxt *bp)
}
static int bnxt_hwrm_check_vf_rings(struct bnxt *bp, int tx_rings, int rx_rings,
- int ring_grps, int cp_rings)
+ int ring_grps, int cp_rings, int vnics)
{
struct hwrm_func_vf_cfg_input req = {0};
- u32 flags, enables;
+ u32 flags;
int rc;
if (!(bp->flags & BNXT_FLAG_NEW_RM))
return 0;
- bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_VF_CFG, -1, -1);
+ __bnxt_hwrm_reserve_vf_rings(bp, &req, tx_rings, rx_rings, ring_grps,
+ cp_rings, vnics);
flags = FUNC_VF_CFG_REQ_FLAGS_TX_ASSETS_TEST |
FUNC_VF_CFG_REQ_FLAGS_RX_ASSETS_TEST |
FUNC_VF_CFG_REQ_FLAGS_CMPL_ASSETS_TEST |
FUNC_VF_CFG_REQ_FLAGS_RING_GRP_ASSETS_TEST |
FUNC_VF_CFG_REQ_FLAGS_STAT_CTX_ASSETS_TEST |
FUNC_VF_CFG_REQ_FLAGS_VNIC_ASSETS_TEST;
- enables = FUNC_VF_CFG_REQ_ENABLES_NUM_TX_RINGS |
- FUNC_VF_CFG_REQ_ENABLES_NUM_RX_RINGS |
- FUNC_VF_CFG_REQ_ENABLES_NUM_CMPL_RINGS |
- FUNC_VF_CFG_REQ_ENABLES_NUM_HW_RING_GRPS |
- FUNC_VF_CFG_REQ_ENABLES_NUM_STAT_CTXS |
- FUNC_VF_CFG_REQ_ENABLES_NUM_VNICS;
req.flags = cpu_to_le32(flags);
- req.enables = cpu_to_le32(enables);
- req.num_tx_rings = cpu_to_le16(tx_rings);
- req.num_rx_rings = cpu_to_le16(rx_rings);
- req.num_cmpl_rings = cpu_to_le16(cp_rings);
- req.num_hw_ring_grps = cpu_to_le16(ring_grps);
- req.num_stat_ctxs = cpu_to_le16(cp_rings);
- req.num_vnics = cpu_to_le16(1);
- if (bp->flags & BNXT_FLAG_RFS)
- req.num_vnics = cpu_to_le16(rx_rings + 1);
rc = hwrm_send_message_silent(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
if (rc)
return -ENOMEM;
@@ -4783,38 +4793,23 @@ static int bnxt_hwrm_check_vf_rings(struct bnxt *bp, int tx_rings, int rx_rings,
}
static int bnxt_hwrm_check_pf_rings(struct bnxt *bp, int tx_rings, int rx_rings,
- int ring_grps, int cp_rings)
+ int ring_grps, int cp_rings, int vnics)
{
struct hwrm_func_cfg_input req = {0};
- u32 flags, enables;
+ u32 flags;
int rc;
- bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_CFG, -1, -1);
- req.fid = cpu_to_le16(0xffff);
+ __bnxt_hwrm_reserve_pf_rings(bp, &req, tx_rings, rx_rings, ring_grps,
+ cp_rings, vnics);
flags = FUNC_CFG_REQ_FLAGS_TX_ASSETS_TEST;
- enables = FUNC_CFG_REQ_ENABLES_NUM_TX_RINGS;
- req.num_tx_rings = cpu_to_le16(tx_rings);
- if (bp->flags & BNXT_FLAG_NEW_RM) {
+ if (bp->flags & BNXT_FLAG_NEW_RM)
flags |= FUNC_CFG_REQ_FLAGS_RX_ASSETS_TEST |
FUNC_CFG_REQ_FLAGS_CMPL_ASSETS_TEST |
FUNC_CFG_REQ_FLAGS_RING_GRP_ASSETS_TEST |
FUNC_CFG_REQ_FLAGS_STAT_CTX_ASSETS_TEST |
FUNC_CFG_REQ_FLAGS_VNIC_ASSETS_TEST;
- enables |= FUNC_CFG_REQ_ENABLES_NUM_RX_RINGS |
- FUNC_CFG_REQ_ENABLES_NUM_CMPL_RINGS |
- FUNC_CFG_REQ_ENABLES_NUM_HW_RING_GRPS |
- FUNC_CFG_REQ_ENABLES_NUM_STAT_CTXS |
- FUNC_CFG_REQ_ENABLES_NUM_VNICS;
- req.num_rx_rings = cpu_to_le16(rx_rings);
- req.num_cmpl_rings = cpu_to_le16(cp_rings);
- req.num_hw_ring_grps = cpu_to_le16(ring_grps);
- req.num_stat_ctxs = cpu_to_le16(cp_rings);
- req.num_vnics = cpu_to_le16(1);
- if (bp->flags & BNXT_FLAG_RFS)
- req.num_vnics = cpu_to_le16(rx_rings + 1);
- }
+
req.flags = cpu_to_le32(flags);
- req.enables = cpu_to_le32(enables);
rc = hwrm_send_message_silent(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
if (rc)
return -ENOMEM;
@@ -4822,17 +4817,17 @@ static int bnxt_hwrm_check_pf_rings(struct bnxt *bp, int tx_rings, int rx_rings,
}
static int bnxt_hwrm_check_rings(struct bnxt *bp, int tx_rings, int rx_rings,
- int ring_grps, int cp_rings)
+ int ring_grps, int cp_rings, int vnics)
{
if (bp->hwrm_spec_code < 0x10801)
return 0;
if (BNXT_PF(bp))
return bnxt_hwrm_check_pf_rings(bp, tx_rings, rx_rings,
- ring_grps, cp_rings);
+ ring_grps, cp_rings, vnics);
return bnxt_hwrm_check_vf_rings(bp, tx_rings, rx_rings, ring_grps,
- cp_rings);
+ cp_rings, vnics);
}
static void bnxt_hwrm_set_coal_params(struct bnxt_coal *hw_coal,
@@ -5865,7 +5860,6 @@ static int bnxt_init_msix(struct bnxt *bp)
if (rc)
goto msix_setup_exit;
- bp->tx_nr_rings_per_tc = bp->tx_nr_rings;
bp->cp_nr_rings = (min == 1) ?
max_t(int, bp->tx_nr_rings, bp->rx_nr_rings) :
bp->tx_nr_rings + bp->rx_nr_rings;
@@ -5897,7 +5891,6 @@ static int bnxt_init_inta(struct bnxt *bp)
bp->rx_nr_rings = 1;
bp->tx_nr_rings = 1;
bp->cp_nr_rings = 1;
- bp->tx_nr_rings_per_tc = bp->tx_nr_rings;
bp->flags |= BNXT_FLAG_SHARED_RINGS;
bp->irq_tbl[0].vector = bp->pdev->irq;
return 0;
@@ -7531,7 +7524,7 @@ int bnxt_check_rings(struct bnxt *bp, int tx, int rx, bool sh, int tcs,
int max_rx, max_tx, tx_sets = 1;
int tx_rings_needed;
int rx_rings = rx;
- int cp, rc;
+ int cp, vnics, rc;
if (tcs)
tx_sets = tcs;
@@ -7547,10 +7540,15 @@ int bnxt_check_rings(struct bnxt *bp, int tx, int rx, bool sh, int tcs,
if (max_tx < tx_rings_needed)
return -ENOMEM;
+ vnics = 1;
+ if (bp->flags & BNXT_FLAG_RFS)
+ vnics += rx_rings;
+
if (bp->flags & BNXT_FLAG_AGG_RINGS)
rx_rings <<= 1;
cp = sh ? max_t(int, tx_rings_needed, rx) : tx_rings_needed + rx;
- return bnxt_hwrm_check_rings(bp, tx_rings_needed, rx_rings, rx, cp);
+ return bnxt_hwrm_check_rings(bp, tx_rings_needed, rx_rings, rx, cp,
+ vnics);
}
static void bnxt_unmap_bars(struct bnxt *bp, struct pci_dev *pdev)
@@ -8437,13 +8435,20 @@ int bnxt_restore_pf_fw_resources(struct bnxt *bp)
return 0;
bnxt_hwrm_func_qcaps(bp);
- __bnxt_close_nic(bp, true, false);
+
+ if (netif_running(bp->dev))
+ __bnxt_close_nic(bp, true, false);
+
bnxt_clear_int_mode(bp);
rc = bnxt_init_int_mode(bp);
- if (rc)
- dev_close(bp->dev);
- else
- rc = bnxt_open_nic(bp, true, false);
+
+ if (netif_running(bp->dev)) {
+ if (rc)
+ dev_close(bp->dev);
+ else
+ rc = bnxt_open_nic(bp, true, false);
+ }
+
return rc;
}
@@ -8664,6 +8669,11 @@ static int bnxt_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
if (rc)
goto init_err_pci_clean;
+ /* No TC has been set yet and rings may have been trimmed due to
+ * limited MSIX, so we re-initialize the TX rings per TC.
+ */
+ bp->tx_nr_rings_per_tc = bp->tx_nr_rings;
+
bnxt_get_wol_settings(bp);
if (bp->flags & BNXT_FLAG_WOL_CAP)
device_set_wakeup_enable(&pdev->dev, bp->wol);
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.h b/drivers/net/ethernet/broadcom/bnxt/bnxt.h
index 1989c470172cba..5e3d62189cab8e 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt.h
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.h
@@ -189,6 +189,7 @@ struct rx_cmp_ext {
#define RX_CMP_FLAGS2_T_L4_CS_CALC (0x1 << 3)
#define RX_CMP_FLAGS2_META_FORMAT_VLAN (0x1 << 4)
__le32 rx_cmp_meta_data;
+ #define RX_CMP_FLAGS2_METADATA_TCI_MASK 0xffff
#define RX_CMP_FLAGS2_METADATA_VID_MASK 0xfff
#define RX_CMP_FLAGS2_METADATA_TPID_MASK 0xffff0000
#define RX_CMP_FLAGS2_METADATA_TPID_SFT 16
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_tc.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_tc.c
index fbe6e208e17b9a..65c2cee357669a 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_tc.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_tc.c
@@ -349,6 +349,9 @@ static int bnxt_hwrm_cfa_flow_free(struct bnxt *bp, __le16 flow_handle)
if (rc)
netdev_info(bp->dev, "Error: %s: flow_handle=0x%x rc=%d",
__func__, flow_handle, rc);
+
+ if (rc)
+ rc = -EIO;
return rc;
}
@@ -484,13 +487,15 @@ static int bnxt_hwrm_cfa_flow_alloc(struct bnxt *bp, struct bnxt_tc_flow *flow,
req.action_flags = cpu_to_le16(action_flags);
mutex_lock(&bp->hwrm_cmd_lock);
-
rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
if (!rc)
*flow_handle = resp->flow_handle;
-
mutex_unlock(&bp->hwrm_cmd_lock);
+ if (rc == HWRM_ERR_CODE_RESOURCE_ALLOC_ERROR)
+ rc = -ENOSPC;
+ else if (rc)
+ rc = -EIO;
return rc;
}
@@ -561,6 +566,8 @@ static int hwrm_cfa_decap_filter_alloc(struct bnxt *bp,
netdev_info(bp->dev, "%s: Error rc=%d", __func__, rc);
mutex_unlock(&bp->hwrm_cmd_lock);
+ if (rc)
+ rc = -EIO;
return rc;
}
@@ -576,6 +583,9 @@ static int hwrm_cfa_decap_filter_free(struct bnxt *bp,
rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
if (rc)
netdev_info(bp->dev, "%s: Error rc=%d", __func__, rc);
+
+ if (rc)
+ rc = -EIO;
return rc;
}
@@ -624,6 +634,8 @@ static int hwrm_cfa_encap_record_alloc(struct bnxt *bp,
netdev_info(bp->dev, "%s: Error rc=%d", __func__, rc);
mutex_unlock(&bp->hwrm_cmd_lock);
+ if (rc)
+ rc = -EIO;
return rc;
}
@@ -639,6 +651,9 @@ static int hwrm_cfa_encap_record_free(struct bnxt *bp,
rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
if (rc)
netdev_info(bp->dev, "%s: Error rc=%d", __func__, rc);
+
+ if (rc)
+ rc = -EIO;
return rc;
}
@@ -1269,11 +1284,8 @@ static int bnxt_tc_del_flow(struct bnxt *bp,
flow_node = rhashtable_lookup_fast(&tc_info->flow_table,
&tc_flow_cmd->cookie,
tc_info->flow_ht_params);
- if (!flow_node) {
- netdev_info(bp->dev, "ERROR: no flow_node for cookie %lx",
- tc_flow_cmd->cookie);
+ if (!flow_node)
return -EINVAL;
- }
return __bnxt_tc_del_flow(bp, flow_node);
}
@@ -1290,11 +1302,8 @@ static int bnxt_tc_get_flow_stats(struct bnxt *bp,
flow_node = rhashtable_lookup_fast(&tc_info->flow_table,
&tc_flow_cmd->cookie,
tc_info->flow_ht_params);
- if (!flow_node) {
- netdev_info(bp->dev, "Error: no flow_node for cookie %lx",
- tc_flow_cmd->cookie);
+ if (!flow_node)
return -1;
- }
flow = &flow_node->flow;
curr_stats = &flow->stats;
@@ -1344,8 +1353,10 @@ bnxt_hwrm_cfa_flow_stats_get(struct bnxt *bp, int num_flows,
} else {
netdev_info(bp->dev, "error rc=%d", rc);
}
-
mutex_unlock(&bp->hwrm_cmd_lock);
+
+ if (rc)
+ rc = -EIO;
return rc;
}
diff --git a/drivers/net/ethernet/broadcom/tg3.c b/drivers/net/ethernet/broadcom/tg3.c
index c1841db1b500fa..f2593978ae75fb 100644
--- a/drivers/net/ethernet/broadcom/tg3.c
+++ b/drivers/net/ethernet/broadcom/tg3.c
@@ -820,7 +820,7 @@ static int tg3_ape_event_lock(struct tg3 *tp, u32 timeout_us)
tg3_ape_unlock(tp, TG3_APE_LOCK_MEM);
- usleep_range(10, 20);
+ udelay(10);
timeout_us -= (timeout_us > 10) ? 10 : timeout_us;
}
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
index 7b452e85de2ad1..61022b5f6743e7 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
@@ -4970,7 +4970,6 @@ static void cxgb4_mgmt_setup(struct net_device *dev)
/* Initialize the device structure. */
dev->netdev_ops = &cxgb4_mgmt_netdev_ops;
dev->ethtool_ops = &cxgb4_mgmt_ethtool_ops;
- dev->needs_free_netdev = true;
}
static int cxgb4_iov_configure(struct pci_dev *pdev, int num_vfs)
@@ -5181,6 +5180,8 @@ static int init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
adapter->name = pci_name(pdev);
adapter->mbox = func;
adapter->pf = func;
+ adapter->params.chip = chip;
+ adapter->adap_idx = adap_idx;
adapter->msg_enable = DFLT_MSG_ENABLE;
adapter->mbox_log = kzalloc(sizeof(*adapter->mbox_log) +
(sizeof(struct mbox_cmd) *
diff --git a/drivers/net/ethernet/cortina/gemini.c b/drivers/net/ethernet/cortina/gemini.c
index 5eb999af2c4000..bd3f6e4d134138 100644
--- a/drivers/net/ethernet/cortina/gemini.c
+++ b/drivers/net/ethernet/cortina/gemini.c
@@ -540,6 +540,7 @@ static int gmac_setup_txqs(struct net_device *netdev)
if (port->txq_dma_base & ~DMA_Q_BASE_MASK) {
dev_warn(geth->dev, "TX queue base it not aligned\n");
+ kfree(skb_tab);
return -ENOMEM;
}
diff --git a/drivers/net/ethernet/freescale/dpaa/dpaa_eth.c b/drivers/net/ethernet/freescale/dpaa/dpaa_eth.c
index 7caa8da484217e..e4ec32a9ca1526 100644
--- a/drivers/net/ethernet/freescale/dpaa/dpaa_eth.c
+++ b/drivers/net/ethernet/freescale/dpaa/dpaa_eth.c
@@ -2008,7 +2008,6 @@ static inline int dpaa_xmit(struct dpaa_priv *priv,
}
if (unlikely(err < 0)) {
- percpu_stats->tx_errors++;
percpu_stats->tx_fifo_errors++;
return err;
}
@@ -2278,7 +2277,6 @@ static enum qman_cb_dqrr_result rx_default_dqrr(struct qman_portal *portal,
vaddr = phys_to_virt(addr);
prefetch(vaddr + qm_fd_get_offset(fd));
- fd_format = qm_fd_get_format(fd);
/* The only FD types that we may receive are contig and S/G */
WARN_ON((fd_format != qm_fd_contig) && (fd_format != qm_fd_sg));
@@ -2311,8 +2309,10 @@ static enum qman_cb_dqrr_result rx_default_dqrr(struct qman_portal *portal,
skb_len = skb->len;
- if (unlikely(netif_receive_skb(skb) == NET_RX_DROP))
+ if (unlikely(netif_receive_skb(skb) == NET_RX_DROP)) {
+ percpu_stats->rx_dropped++;
return qman_cb_dqrr_consume;
+ }
percpu_stats->rx_packets++;
percpu_stats->rx_bytes += skb_len;
@@ -2860,7 +2860,7 @@ static int dpaa_remove(struct platform_device *pdev)
struct device *dev;
int err;
- dev = &pdev->dev;
+ dev = pdev->dev.parent;
net_dev = dev_get_drvdata(dev);
priv = netdev_priv(net_dev);
diff --git a/drivers/net/ethernet/freescale/fec_main.c b/drivers/net/ethernet/freescale/fec_main.c
index 7a7f3a42b2aa1e..d4604bc8eb5b04 100644
--- a/drivers/net/ethernet/freescale/fec_main.c
+++ b/drivers/net/ethernet/freescale/fec_main.c
@@ -3600,6 +3600,8 @@ fec_drv_remove(struct platform_device *pdev)
fec_enet_mii_remove(fep);
if (fep->reg_phy)
regulator_disable(fep->reg_phy);
+ pm_runtime_put(&pdev->dev);
+ pm_runtime_disable(&pdev->dev);
if (of_phy_is_fixed_link(np))
of_phy_deregister_fixed_link(np);
of_node_put(fep->phy_node);
diff --git a/drivers/net/ethernet/freescale/fman/fman_dtsec.c b/drivers/net/ethernet/freescale/fman/fman_dtsec.c
index ea43b497414986..7af31ddd093f85 100644
--- a/drivers/net/ethernet/freescale/fman/fman_dtsec.c
+++ b/drivers/net/ethernet/freescale/fman/fman_dtsec.c
@@ -1100,7 +1100,7 @@ int dtsec_add_hash_mac_address(struct fman_mac *dtsec, enet_addr_t *eth_addr)
set_bucket(dtsec->regs, bucket, true);
/* Create element to be added to the driver hash table */
- hash_entry = kmalloc(sizeof(*hash_entry), GFP_KERNEL);
+ hash_entry = kmalloc(sizeof(*hash_entry), GFP_ATOMIC);
if (!hash_entry)
return -ENOMEM;
hash_entry->addr = addr;
diff --git a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_gmac.c b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_gmac.c
index 86944bc3b273fd..74bd260ca02a88 100644
--- a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_gmac.c
+++ b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_gmac.c
@@ -666,7 +666,7 @@ static void hns_gmac_get_strings(u32 stringset, u8 *data)
static int hns_gmac_get_sset_count(int stringset)
{
- if (stringset == ETH_SS_STATS || stringset == ETH_SS_PRIV_FLAGS)
+ if (stringset == ETH_SS_STATS)
return ARRAY_SIZE(g_gmac_stats_string);
return 0;
diff --git a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_ppe.c b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_ppe.c
index b62816c1574eb8..93e71e27401b4d 100644
--- a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_ppe.c
+++ b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_ppe.c
@@ -422,7 +422,7 @@ void hns_ppe_update_stats(struct hns_ppe_cb *ppe_cb)
int hns_ppe_get_sset_count(int stringset)
{
- if (stringset == ETH_SS_STATS || stringset == ETH_SS_PRIV_FLAGS)
+ if (stringset == ETH_SS_STATS)
return ETH_PPE_STATIC_NUM;
return 0;
}
diff --git a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_rcb.c b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_rcb.c
index 6f3570cfb50160..e2e28532e4dc2d 100644
--- a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_rcb.c
+++ b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_rcb.c
@@ -876,7 +876,7 @@ void hns_rcb_get_stats(struct hnae_queue *queue, u64 *data)
*/
int hns_rcb_get_ring_sset_count(int stringset)
{
- if (stringset == ETH_SS_STATS || stringset == ETH_SS_PRIV_FLAGS)
+ if (stringset == ETH_SS_STATS)
return HNS_RING_STATIC_REG_NUM;
return 0;
diff --git a/drivers/net/ethernet/hisilicon/hns/hns_ethtool.c b/drivers/net/ethernet/hisilicon/hns/hns_ethtool.c
index 7ea7f8a4aa2a94..2e14a3ae1d8be0 100644
--- a/drivers/net/ethernet/hisilicon/hns/hns_ethtool.c
+++ b/drivers/net/ethernet/hisilicon/hns/hns_ethtool.c
@@ -993,8 +993,10 @@ int hns_get_sset_count(struct net_device *netdev, int stringset)
cnt--;
return cnt;
- } else {
+ } else if (stringset == ETH_SS_STATS) {
return (HNS_NET_STATS_CNT + ops->get_sset_count(h, stringset));
+ } else {
+ return -EOPNOTSUPP;
}
}
diff --git a/drivers/net/ethernet/intel/e1000e/defines.h b/drivers/net/ethernet/intel/e1000e/defines.h
index afb7ebe20b2438..824fd44e25f0d6 100644
--- a/drivers/net/ethernet/intel/e1000e/defines.h
+++ b/drivers/net/ethernet/intel/e1000e/defines.h
@@ -400,6 +400,10 @@
#define E1000_ICR_RXDMT0 0x00000010 /* Rx desc min. threshold (0) */
#define E1000_ICR_RXO 0x00000040 /* Receiver Overrun */
#define E1000_ICR_RXT0 0x00000080 /* Rx timer intr (ring 0) */
+#define E1000_ICR_MDAC 0x00000200 /* MDIO Access Complete */
+#define E1000_ICR_SRPD 0x00010000 /* Small Receive Packet Detected */
+#define E1000_ICR_ACK 0x00020000 /* Receive ACK Frame Detected */
+#define E1000_ICR_MNG 0x00040000 /* Manageability Event Detected */
#define E1000_ICR_ECCER 0x00400000 /* Uncorrectable ECC Error */
/* If this bit asserted, the driver should claim the interrupt */
#define E1000_ICR_INT_ASSERTED 0x80000000
@@ -407,7 +411,7 @@
#define E1000_ICR_RXQ1 0x00200000 /* Rx Queue 1 Interrupt */
#define E1000_ICR_TXQ0 0x00400000 /* Tx Queue 0 Interrupt */
#define E1000_ICR_TXQ1 0x00800000 /* Tx Queue 1 Interrupt */
-#define E1000_ICR_OTHER 0x01000000 /* Other Interrupts */
+#define E1000_ICR_OTHER 0x01000000 /* Other Interrupt */
/* PBA ECC Register */
#define E1000_PBA_ECC_COUNTER_MASK 0xFFF00000 /* ECC counter mask */
@@ -431,12 +435,27 @@
E1000_IMS_RXSEQ | \
E1000_IMS_LSC)
+/* These are all of the events related to the OTHER interrupt.
+ */
+#define IMS_OTHER_MASK ( \
+ E1000_IMS_LSC | \
+ E1000_IMS_RXO | \
+ E1000_IMS_MDAC | \
+ E1000_IMS_SRPD | \
+ E1000_IMS_ACK | \
+ E1000_IMS_MNG)
+
/* Interrupt Mask Set */
#define E1000_IMS_TXDW E1000_ICR_TXDW /* Transmit desc written back */
#define E1000_IMS_LSC E1000_ICR_LSC /* Link Status Change */
#define E1000_IMS_RXSEQ E1000_ICR_RXSEQ /* Rx sequence error */
#define E1000_IMS_RXDMT0 E1000_ICR_RXDMT0 /* Rx desc min. threshold */
+#define E1000_IMS_RXO E1000_ICR_RXO /* Receiver Overrun */
#define E1000_IMS_RXT0 E1000_ICR_RXT0 /* Rx timer intr */
+#define E1000_IMS_MDAC E1000_ICR_MDAC /* MDIO Access Complete */
+#define E1000_IMS_SRPD E1000_ICR_SRPD /* Small Receive Packet */
+#define E1000_IMS_ACK E1000_ICR_ACK /* Receive ACK Frame Detected */
+#define E1000_IMS_MNG E1000_ICR_MNG /* Manageability Event */
#define E1000_IMS_ECCER E1000_ICR_ECCER /* Uncorrectable ECC Error */
#define E1000_IMS_RXQ0 E1000_ICR_RXQ0 /* Rx Queue 0 Interrupt */
#define E1000_IMS_RXQ1 E1000_ICR_RXQ1 /* Rx Queue 1 Interrupt */
diff --git a/drivers/net/ethernet/intel/e1000e/ich8lan.c b/drivers/net/ethernet/intel/e1000e/ich8lan.c
index 31277d3bb7dc12..1dddfb7b2de6c9 100644
--- a/drivers/net/ethernet/intel/e1000e/ich8lan.c
+++ b/drivers/net/ethernet/intel/e1000e/ich8lan.c
@@ -1367,9 +1367,6 @@ out:
* Checks to see of the link status of the hardware has changed. If a
* change in link status has been detected, then we read the PHY registers
* to get the current speed/duplex if link exists.
- *
- * Returns a negative error code (-E1000_ERR_*) or 0 (link down) or 1 (link
- * up).
**/
static s32 e1000_check_for_copper_link_ich8lan(struct e1000_hw *hw)
{
@@ -1385,7 +1382,8 @@ static s32 e1000_check_for_copper_link_ich8lan(struct e1000_hw *hw)
* Change or Rx Sequence Error interrupt.
*/
if (!mac->get_link_status)
- return 1;
+ return 0;
+ mac->get_link_status = false;
/* First we want to see if the MII Status Register reports
* link. If so, then we want to get the current speed/duplex
@@ -1393,12 +1391,12 @@ static s32 e1000_check_for_copper_link_ich8lan(struct e1000_hw *hw)
*/
ret_val = e1000e_phy_has_link_generic(hw, 1, 0, &link);
if (ret_val)
- return ret_val;
+ goto out;
if (hw->mac.type == e1000_pchlan) {
ret_val = e1000_k1_gig_workaround_hv(hw, link);
if (ret_val)
- return ret_val;
+ goto out;
}
/* When connected at 10Mbps half-duplex, some parts are excessively
@@ -1431,7 +1429,7 @@ static s32 e1000_check_for_copper_link_ich8lan(struct e1000_hw *hw)
ret_val = hw->phy.ops.acquire(hw);
if (ret_val)
- return ret_val;
+ goto out;
if (hw->mac.type == e1000_pch2lan)
emi_addr = I82579_RX_CONFIG;
@@ -1453,7 +1451,7 @@ static s32 e1000_check_for_copper_link_ich8lan(struct e1000_hw *hw)
hw->phy.ops.release(hw);
if (ret_val)
- return ret_val;
+ goto out;
if (hw->mac.type >= e1000_pch_spt) {
u16 data;
@@ -1462,14 +1460,14 @@ static s32 e1000_check_for_copper_link_ich8lan(struct e1000_hw *hw)
if (speed == SPEED_1000) {
ret_val = hw->phy.ops.acquire(hw);
if (ret_val)
- return ret_val;
+ goto out;
ret_val = e1e_rphy_locked(hw,
PHY_REG(776, 20),
&data);
if (ret_val) {
hw->phy.ops.release(hw);
- return ret_val;
+ goto out;
}
ptr_gap = (data & (0x3FF << 2)) >> 2;
@@ -1483,18 +1481,18 @@ static s32 e1000_check_for_copper_link_ich8lan(struct e1000_hw *hw)
}
hw->phy.ops.release(hw);
if (ret_val)
- return ret_val;
+ goto out;
} else {
ret_val = hw->phy.ops.acquire(hw);
if (ret_val)
- return ret_val;
+ goto out;
ret_val = e1e_wphy_locked(hw,
PHY_REG(776, 20),
0xC023);
hw->phy.ops.release(hw);
if (ret_val)
- return ret_val;
+ goto out;
}
}
@@ -1521,7 +1519,7 @@ static s32 e1000_check_for_copper_link_ich8lan(struct e1000_hw *hw)
(hw->adapter->pdev->device == E1000_DEV_ID_PCH_I218_V3)) {
ret_val = e1000_k1_workaround_lpt_lp(hw, link);
if (ret_val)
- return ret_val;
+ goto out;
}
if (hw->mac.type >= e1000_pch_lpt) {
/* Set platform power management values for
@@ -1529,7 +1527,7 @@ static s32 e1000_check_for_copper_link_ich8lan(struct e1000_hw *hw)
*/
ret_val = e1000_platform_pm_pch_lpt(hw, link);
if (ret_val)
- return ret_val;
+ goto out;
}
/* Clear link partner's EEE ability */
@@ -1552,9 +1550,7 @@ static s32 e1000_check_for_copper_link_ich8lan(struct e1000_hw *hw)
}
if (!link)
- return 0; /* No link detected */
-
- mac->get_link_status = false;
+ goto out;
switch (hw->mac.type) {
case e1000_pch2lan:
@@ -1616,12 +1612,14 @@ static s32 e1000_check_for_copper_link_ich8lan(struct e1000_hw *hw)
* different link partner.
*/
ret_val = e1000e_config_fc_after_link_up(hw);
- if (ret_val) {
+ if (ret_val)
e_dbg("Error configuring flow control\n");
- return ret_val;
- }
- return 1;
+ return ret_val;
+
+out:
+ mac->get_link_status = true;
+ return ret_val;
}
static s32 e1000_get_variants_ich8lan(struct e1000_adapter *adapter)
diff --git a/drivers/net/ethernet/intel/e1000e/mac.c b/drivers/net/ethernet/intel/e1000e/mac.c
index f457c5703d0c45..5bdc3a2d4fd70a 100644
--- a/drivers/net/ethernet/intel/e1000e/mac.c
+++ b/drivers/net/ethernet/intel/e1000e/mac.c
@@ -410,9 +410,6 @@ void e1000e_clear_hw_cntrs_base(struct e1000_hw *hw)
* Checks to see of the link status of the hardware has changed. If a
* change in link status has been detected, then we read the PHY registers
* to get the current speed/duplex if link exists.
- *
- * Returns a negative error code (-E1000_ERR_*) or 0 (link down) or 1 (link
- * up).
**/
s32 e1000e_check_for_copper_link(struct e1000_hw *hw)
{
@@ -426,20 +423,16 @@ s32 e1000e_check_for_copper_link(struct e1000_hw *hw)
* Change or Rx Sequence Error interrupt.
*/
if (!mac->get_link_status)
- return 1;
+ return 0;
+ mac->get_link_status = false;
/* First we want to see if the MII Status Register reports
* link. If so, then we want to get the current speed/duplex
* of the PHY.
*/
ret_val = e1000e_phy_has_link_generic(hw, 1, 0, &link);
- if (ret_val)
- return ret_val;
-
- if (!link)
- return 0; /* No link detected */
-
- mac->get_link_status = false;
+ if (ret_val || !link)
+ goto out;
/* Check if there was DownShift, must be checked
* immediately after link-up
@@ -464,12 +457,14 @@ s32 e1000e_check_for_copper_link(struct e1000_hw *hw)
* different link partner.
*/
ret_val = e1000e_config_fc_after_link_up(hw);
- if (ret_val) {
+ if (ret_val)
e_dbg("Error configuring flow control\n");
- return ret_val;
- }
- return 1;
+ return ret_val;
+
+out:
+ mac->get_link_status = true;
+ return ret_val;
}
/**
diff --git a/drivers/net/ethernet/intel/e1000e/netdev.c b/drivers/net/ethernet/intel/e1000e/netdev.c
index 1298b69f990b40..dc853b0863aff1 100644
--- a/drivers/net/ethernet/intel/e1000e/netdev.c
+++ b/drivers/net/ethernet/intel/e1000e/netdev.c
@@ -1914,30 +1914,20 @@ static irqreturn_t e1000_msix_other(int __always_unused irq, void *data)
struct net_device *netdev = data;
struct e1000_adapter *adapter = netdev_priv(netdev);
struct e1000_hw *hw = &adapter->hw;
- u32 icr;
- bool enable = true;
-
- icr = er32(ICR);
- if (icr & E1000_ICR_RXO) {
- ew32(ICR, E1000_ICR_RXO);
- enable = false;
- /* napi poll will re-enable Other, make sure it runs */
- if (napi_schedule_prep(&adapter->napi)) {
- adapter->total_rx_bytes = 0;
- adapter->total_rx_packets = 0;
- __napi_schedule(&adapter->napi);
- }
- }
+ u32 icr = er32(ICR);
+
+ if (icr & adapter->eiac_mask)
+ ew32(ICS, (icr & adapter->eiac_mask));
+
if (icr & E1000_ICR_LSC) {
- ew32(ICR, E1000_ICR_LSC);
hw->mac.get_link_status = true;
/* guard against interrupt when we're going down */
if (!test_bit(__E1000_DOWN, &adapter->state))
mod_timer(&adapter->watchdog_timer, jiffies + 1);
}
- if (enable && !test_bit(__E1000_DOWN, &adapter->state))
- ew32(IMS, E1000_IMS_OTHER);
+ if (!test_bit(__E1000_DOWN, &adapter->state))
+ ew32(IMS, E1000_IMS_OTHER | IMS_OTHER_MASK);
return IRQ_HANDLED;
}
@@ -2040,7 +2030,6 @@ static void e1000_configure_msix(struct e1000_adapter *adapter)
hw->hw_addr + E1000_EITR_82574(vector));
else
writel(1, hw->hw_addr + E1000_EITR_82574(vector));
- adapter->eiac_mask |= E1000_IMS_OTHER;
/* Cause Tx interrupts on every write back */
ivar |= BIT(31);
@@ -2265,7 +2254,8 @@ static void e1000_irq_enable(struct e1000_adapter *adapter)
if (adapter->msix_entries) {
ew32(EIAC_82574, adapter->eiac_mask & E1000_EIAC_MASK_82574);
- ew32(IMS, adapter->eiac_mask | E1000_IMS_LSC);
+ ew32(IMS, adapter->eiac_mask | E1000_IMS_OTHER |
+ IMS_OTHER_MASK);
} else if (hw->mac.type >= e1000_pch_lpt) {
ew32(IMS, IMS_ENABLE_MASK | E1000_IMS_ECCER);
} else {
@@ -2333,8 +2323,8 @@ static int e1000_alloc_ring_dma(struct e1000_adapter *adapter,
{
struct pci_dev *pdev = adapter->pdev;
- ring->desc = dma_alloc_coherent(&pdev->dev, ring->size, &ring->dma,
- GFP_KERNEL);
+ ring->desc = dma_zalloc_coherent(&pdev->dev, ring->size, &ring->dma,
+ GFP_KERNEL);
if (!ring->desc)
return -ENOMEM;
@@ -2707,8 +2697,7 @@ static int e1000e_poll(struct napi_struct *napi, int weight)
napi_complete_done(napi, work_done);
if (!test_bit(__E1000_DOWN, &adapter->state)) {
if (adapter->msix_entries)
- ew32(IMS, adapter->rx_ring->ims_val |
- E1000_IMS_OTHER);
+ ew32(IMS, adapter->rx_ring->ims_val);
else
e1000_irq_enable(adapter);
}
@@ -5101,7 +5090,7 @@ static bool e1000e_has_link(struct e1000_adapter *adapter)
case e1000_media_type_copper:
if (hw->mac.get_link_status) {
ret_val = hw->mac.ops.check_for_link(hw);
- link_active = ret_val > 0;
+ link_active = !hw->mac.get_link_status;
} else {
link_active = true;
}
diff --git a/drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_actions.c b/drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_actions.c
index b698fb481b2ecb..996dc099cd584c 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_actions.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_actions.c
@@ -443,6 +443,17 @@ int mlxsw_afa_block_jump(struct mlxsw_afa_block *block, u16 group_id)
}
EXPORT_SYMBOL(mlxsw_afa_block_jump);
+int mlxsw_afa_block_terminate(struct mlxsw_afa_block *block)
+{
+ if (block->finished)
+ return -EINVAL;
+ mlxsw_afa_set_goto_set(block->cur_set,
+ MLXSW_AFA_SET_GOTO_BINDING_CMD_TERM, 0);
+ block->finished = true;
+ return 0;
+}
+EXPORT_SYMBOL(mlxsw_afa_block_terminate);
+
static struct mlxsw_afa_fwd_entry *
mlxsw_afa_fwd_entry_create(struct mlxsw_afa *mlxsw_afa, u8 local_port)
{
diff --git a/drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_actions.h b/drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_actions.h
index 43132293475ce9..b91f2b0829b04a 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_actions.h
+++ b/drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_actions.h
@@ -65,6 +65,7 @@ char *mlxsw_afa_block_first_set(struct mlxsw_afa_block *block);
u32 mlxsw_afa_block_first_set_kvdl_index(struct mlxsw_afa_block *block);
int mlxsw_afa_block_continue(struct mlxsw_afa_block *block);
int mlxsw_afa_block_jump(struct mlxsw_afa_block *block, u16 group_id);
+int mlxsw_afa_block_terminate(struct mlxsw_afa_block *block);
int mlxsw_afa_block_append_drop(struct mlxsw_afa_block *block);
int mlxsw_afa_block_append_trap(struct mlxsw_afa_block *block, u16 trap_id);
int mlxsw_afa_block_append_trap_and_forward(struct mlxsw_afa_block *block,
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum.c
index c7e941aecc2a90..bf400c75fcc8bc 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum.c
@@ -655,13 +655,17 @@ static int mlxsw_sp_span_port_mtu_update(struct mlxsw_sp_port *port, u16 mtu)
}
static struct mlxsw_sp_span_inspected_port *
-mlxsw_sp_span_entry_bound_port_find(struct mlxsw_sp_port *port,
- struct mlxsw_sp_span_entry *span_entry)
+mlxsw_sp_span_entry_bound_port_find(struct mlxsw_sp_span_entry *span_entry,
+ enum mlxsw_sp_span_type type,
+ struct mlxsw_sp_port *port,
+ bool bind)
{
struct mlxsw_sp_span_inspected_port *p;
list_for_each_entry(p, &span_entry->bound_ports_list, list)
- if (port->local_port == p->local_port)
+ if (type == p->type &&
+ port->local_port == p->local_port &&
+ bind == p->bound)
return p;
return NULL;
}
@@ -691,8 +695,22 @@ mlxsw_sp_span_inspected_port_add(struct mlxsw_sp_port *port,
struct mlxsw_sp_span_inspected_port *inspected_port;
struct mlxsw_sp *mlxsw_sp = port->mlxsw_sp;
char sbib_pl[MLXSW_REG_SBIB_LEN];
+ int i;
int err;
+ /* A given (source port, direction) can only be bound to one analyzer,
+ * so if a binding is requested, check for conflicts.
+ */
+ if (bind)
+ for (i = 0; i < mlxsw_sp->span.entries_count; i++) {
+ struct mlxsw_sp_span_entry *curr =
+ &mlxsw_sp->span.entries[i];
+
+ if (mlxsw_sp_span_entry_bound_port_find(curr, type,
+ port, bind))
+ return -EEXIST;
+ }
+
/* if it is an egress SPAN, bind a shared buffer to it */
if (type == MLXSW_SP_SPAN_EGRESS) {
u32 buffsize = mlxsw_sp_span_mtu_to_buffsize(mlxsw_sp,
@@ -720,6 +738,7 @@ mlxsw_sp_span_inspected_port_add(struct mlxsw_sp_port *port,
}
inspected_port->local_port = port->local_port;
inspected_port->type = type;
+ inspected_port->bound = bind;
list_add_tail(&inspected_port->list, &span_entry->bound_ports_list);
return 0;
@@ -746,7 +765,8 @@ mlxsw_sp_span_inspected_port_del(struct mlxsw_sp_port *port,
struct mlxsw_sp *mlxsw_sp = port->mlxsw_sp;
char sbib_pl[MLXSW_REG_SBIB_LEN];
- inspected_port = mlxsw_sp_span_entry_bound_port_find(port, span_entry);
+ inspected_port = mlxsw_sp_span_entry_bound_port_find(span_entry, type,
+ port, bind);
if (!inspected_port)
return;
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum.h b/drivers/net/ethernet/mellanox/mlxsw/spectrum.h
index 4ec1ca3c96c8c5..92064db2ae4421 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum.h
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum.h
@@ -120,6 +120,9 @@ struct mlxsw_sp_span_inspected_port {
struct list_head list;
enum mlxsw_sp_span_type type;
u8 local_port;
+
+ /* Whether this is a directly bound mirror (port-to-port) or an ACL. */
+ bool bound;
};
struct mlxsw_sp_span_entry {
@@ -553,6 +556,7 @@ void mlxsw_sp_acl_rulei_keymask_buf(struct mlxsw_sp_acl_rule_info *rulei,
int mlxsw_sp_acl_rulei_act_continue(struct mlxsw_sp_acl_rule_info *rulei);
int mlxsw_sp_acl_rulei_act_jump(struct mlxsw_sp_acl_rule_info *rulei,
u16 group_id);
+int mlxsw_sp_acl_rulei_act_terminate(struct mlxsw_sp_acl_rule_info *rulei);
int mlxsw_sp_acl_rulei_act_drop(struct mlxsw_sp_acl_rule_info *rulei);
int mlxsw_sp_acl_rulei_act_trap(struct mlxsw_sp_acl_rule_info *rulei);
int mlxsw_sp_acl_rulei_act_mirror(struct mlxsw_sp *mlxsw_sp,
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl.c
index 0897a5435cc2e2..92d90ed7207e62 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl.c
@@ -528,6 +528,11 @@ int mlxsw_sp_acl_rulei_act_jump(struct mlxsw_sp_acl_rule_info *rulei,
return mlxsw_afa_block_jump(rulei->act_block, group_id);
}
+int mlxsw_sp_acl_rulei_act_terminate(struct mlxsw_sp_acl_rule_info *rulei)
+{
+ return mlxsw_afa_block_terminate(rulei->act_block);
+}
+
int mlxsw_sp_acl_rulei_act_drop(struct mlxsw_sp_acl_rule_info *rulei)
{
return mlxsw_afa_block_append_drop(rulei->act_block);
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_buffers.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_buffers.c
index 93728c694e6df9..0a9adc5962fb72 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_buffers.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_buffers.c
@@ -385,13 +385,13 @@ static const struct mlxsw_sp_sb_cm mlxsw_sp_sb_cms_egress[] = {
static const struct mlxsw_sp_sb_cm mlxsw_sp_cpu_port_sb_cms[] = {
MLXSW_SP_CPU_PORT_SB_CM,
+ MLXSW_SP_SB_CM(MLXSW_PORT_MAX_MTU, 0, 0),
+ MLXSW_SP_SB_CM(MLXSW_PORT_MAX_MTU, 0, 0),
+ MLXSW_SP_SB_CM(MLXSW_PORT_MAX_MTU, 0, 0),
+ MLXSW_SP_SB_CM(MLXSW_PORT_MAX_MTU, 0, 0),
+ MLXSW_SP_SB_CM(MLXSW_PORT_MAX_MTU, 0, 0),
MLXSW_SP_CPU_PORT_SB_CM,
- MLXSW_SP_CPU_PORT_SB_CM,
- MLXSW_SP_CPU_PORT_SB_CM,
- MLXSW_SP_CPU_PORT_SB_CM,
- MLXSW_SP_CPU_PORT_SB_CM,
- MLXSW_SP_CPU_PORT_SB_CM,
- MLXSW_SP_SB_CM(10000, 0, 0),
+ MLXSW_SP_SB_CM(MLXSW_PORT_MAX_MTU, 0, 0),
MLXSW_SP_CPU_PORT_SB_CM,
MLXSW_SP_CPU_PORT_SB_CM,
MLXSW_SP_CPU_PORT_SB_CM,
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_flower.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_flower.c
index 6ce00e28d4eac8..89dbf569dff50c 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_flower.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_flower.c
@@ -65,7 +65,7 @@ static int mlxsw_sp_flower_parse_actions(struct mlxsw_sp *mlxsw_sp,
tcf_exts_to_list(exts, &actions);
list_for_each_entry(a, &actions, list) {
if (is_tcf_gact_ok(a)) {
- err = mlxsw_sp_acl_rulei_act_continue(rulei);
+ err = mlxsw_sp_acl_rulei_act_terminate(rulei);
if (err)
return err;
} else if (is_tcf_gact_shot(a)) {
diff --git a/drivers/net/ethernet/natsemi/Kconfig b/drivers/net/ethernet/natsemi/Kconfig
index a10ef50e4f12c3..017fb232258979 100644
--- a/drivers/net/ethernet/natsemi/Kconfig
+++ b/drivers/net/ethernet/natsemi/Kconfig
@@ -1,16 +1,16 @@
#
-# National Semi-conductor device configuration
+# National Semiconductor device configuration
#
config NET_VENDOR_NATSEMI
- bool "National Semi-conductor devices"
+ bool "National Semiconductor devices"
default y
---help---
If you have a network (Ethernet) card belonging to this class, say Y.
Note that the answer to this question doesn't directly affect the
kernel: saying N will just cause the configurator to skip all
- the questions about National Semi-conductor devices. If you say Y,
+ the questions about National Semiconductor devices. If you say Y,
you will be asked for your specific card in the following questions.
if NET_VENDOR_NATSEMI
diff --git a/drivers/net/ethernet/natsemi/Makefile b/drivers/net/ethernet/natsemi/Makefile
index cc664977596e24..a759aa09ef5960 100644
--- a/drivers/net/ethernet/natsemi/Makefile
+++ b/drivers/net/ethernet/natsemi/Makefile
@@ -1,6 +1,6 @@
# SPDX-License-Identifier: GPL-2.0
#
-# Makefile for the National Semi-conductor Sonic devices.
+# Makefile for the National Semiconductor Sonic devices.
#
obj-$(CONFIG_MACSONIC) += macsonic.o
diff --git a/drivers/net/ethernet/qlogic/qed/qed_cxt.c b/drivers/net/ethernet/qlogic/qed/qed_cxt.c
index 6f546e869d8d69..00f41c145d4d01 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_cxt.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_cxt.c
@@ -2480,7 +2480,10 @@ int qed_cxt_free_proto_ilt(struct qed_hwfn *p_hwfn, enum protocol_type proto)
if (rc)
return rc;
- /* Free Task CXT */
+ /* Free Task CXT ( Intentionally RoCE as task-id is shared between
+ * RoCE and iWARP )
+ */
+ proto = PROTOCOLID_ROCE;
rc = qed_cxt_free_ilt_range(p_hwfn, QED_ELEM_TASK, 0,
qed_cxt_get_proto_tid_count(p_hwfn, proto));
if (rc)
diff --git a/drivers/net/ethernet/qlogic/qed/qed_iwarp.c b/drivers/net/ethernet/qlogic/qed/qed_iwarp.c
index ca4a81dc1ace68..d5d02be7294741 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_iwarp.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_iwarp.c
@@ -1703,6 +1703,13 @@ qed_iwarp_parse_rx_pkt(struct qed_hwfn *p_hwfn,
iph = (struct iphdr *)((u8 *)(ethh) + eth_hlen);
if (eth_type == ETH_P_IP) {
+ if (iph->protocol != IPPROTO_TCP) {
+ DP_NOTICE(p_hwfn,
+ "Unexpected ip protocol on ll2 %x\n",
+ iph->protocol);
+ return -EINVAL;
+ }
+
cm_info->local_ip[0] = ntohl(iph->daddr);
cm_info->remote_ip[0] = ntohl(iph->saddr);
cm_info->ip_version = TCP_IPV4;
@@ -1711,6 +1718,14 @@ qed_iwarp_parse_rx_pkt(struct qed_hwfn *p_hwfn,
*payload_len = ntohs(iph->tot_len) - ip_hlen;
} else if (eth_type == ETH_P_IPV6) {
ip6h = (struct ipv6hdr *)iph;
+
+ if (ip6h->nexthdr != IPPROTO_TCP) {
+ DP_NOTICE(p_hwfn,
+ "Unexpected ip protocol on ll2 %x\n",
+ iph->protocol);
+ return -EINVAL;
+ }
+
for (i = 0; i < 4; i++) {
cm_info->local_ip[i] =
ntohl(ip6h->daddr.in6_u.u6_addr32[i]);
@@ -1928,8 +1943,8 @@ qed_iwarp_update_fpdu_length(struct qed_hwfn *p_hwfn,
/* Missing lower byte is now available */
mpa_len = fpdu->fpdu_length | *mpa_data;
fpdu->fpdu_length = QED_IWARP_FPDU_LEN_WITH_PAD(mpa_len);
- fpdu->mpa_frag_len = fpdu->fpdu_length;
/* one byte of hdr */
+ fpdu->mpa_frag_len = 1;
fpdu->incomplete_bytes = fpdu->fpdu_length - 1;
DP_VERBOSE(p_hwfn,
QED_MSG_RDMA,
diff --git a/drivers/net/ethernet/qlogic/qed/qed_rdma.c b/drivers/net/ethernet/qlogic/qed/qed_rdma.c
index 5d040b873137d0..a411f9c702a16a 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_rdma.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_rdma.c
@@ -379,6 +379,7 @@ static void qed_rdma_free(struct qed_hwfn *p_hwfn)
DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "Freeing RDMA\n");
qed_rdma_free_reserved_lkey(p_hwfn);
+ qed_cxt_free_proto_ilt(p_hwfn, p_hwfn->p_rdma_info->proto);
qed_rdma_resc_free(p_hwfn);
}
diff --git a/drivers/net/ethernet/qlogic/qede/qede_main.c b/drivers/net/ethernet/qlogic/qede/qede_main.c
index 2db70eabddfec1..a01e7d6e5442f0 100644
--- a/drivers/net/ethernet/qlogic/qede/qede_main.c
+++ b/drivers/net/ethernet/qlogic/qede/qede_main.c
@@ -288,7 +288,7 @@ int __init qede_init(void)
}
/* Must register notifier before pci ops, since we might miss
- * interface rename after pci probe and netdev registeration.
+ * interface rename after pci probe and netdev registration.
*/
ret = register_netdevice_notifier(&qede_netdev_notifier);
if (ret) {
@@ -988,7 +988,7 @@ static int __qede_probe(struct pci_dev *pdev, u32 dp_module, u8 dp_level,
if (rc)
goto err3;
- /* Prepare the lock prior to the registeration of the netdev,
+ /* Prepare the lock prior to the registration of the netdev,
* as once it's registered we might reach flows requiring it
* [it's even possible to reach a flow needing it directly
* from there, although it's unlikely].
@@ -2067,8 +2067,6 @@ static int qede_load(struct qede_dev *edev, enum qede_load_mode mode,
link_params.link_up = true;
edev->ops->common->set_link(edev->cdev, &link_params);
- qede_rdma_dev_event_open(edev);
-
edev->state = QEDE_STATE_OPEN;
DP_INFO(edev, "Ending successfully qede load\n");
@@ -2169,12 +2167,14 @@ static void qede_link_update(void *dev, struct qed_link_output *link)
DP_NOTICE(edev, "Link is up\n");
netif_tx_start_all_queues(edev->ndev);
netif_carrier_on(edev->ndev);
+ qede_rdma_dev_event_open(edev);
}
} else {
if (netif_carrier_ok(edev->ndev)) {
DP_NOTICE(edev, "Link is down\n");
netif_tx_disable(edev->ndev);
netif_carrier_off(edev->ndev);
+ qede_rdma_dev_event_close(edev);
}
}
}
diff --git a/drivers/net/ethernet/qlogic/qede/qede_ptp.c b/drivers/net/ethernet/qlogic/qede/qede_ptp.c
index 9b2280badaf776..02adb513f4756c 100644
--- a/drivers/net/ethernet/qlogic/qede/qede_ptp.c
+++ b/drivers/net/ethernet/qlogic/qede/qede_ptp.c
@@ -485,7 +485,7 @@ int qede_ptp_enable(struct qede_dev *edev, bool init_tc)
ptp->clock = ptp_clock_register(&ptp->clock_info, &edev->pdev->dev);
if (IS_ERR(ptp->clock)) {
rc = -EINVAL;
- DP_ERR(edev, "PTP clock registeration failed\n");
+ DP_ERR(edev, "PTP clock registration failed\n");
goto err2;
}
diff --git a/drivers/net/ethernet/qualcomm/emac/emac-mac.c b/drivers/net/ethernet/qualcomm/emac/emac-mac.c
index 9cbb27263742bf..d5a32b7c7dc5a4 100644
--- a/drivers/net/ethernet/qualcomm/emac/emac-mac.c
+++ b/drivers/net/ethernet/qualcomm/emac/emac-mac.c
@@ -1194,9 +1194,9 @@ void emac_mac_tx_process(struct emac_adapter *adpt, struct emac_tx_queue *tx_q)
while (tx_q->tpd.consume_idx != hw_consume_idx) {
tpbuf = GET_TPD_BUFFER(tx_q, tx_q->tpd.consume_idx);
if (tpbuf->dma_addr) {
- dma_unmap_single(adpt->netdev->dev.parent,
- tpbuf->dma_addr, tpbuf->length,
- DMA_TO_DEVICE);
+ dma_unmap_page(adpt->netdev->dev.parent,
+ tpbuf->dma_addr, tpbuf->length,
+ DMA_TO_DEVICE);
tpbuf->dma_addr = 0;
}
@@ -1353,9 +1353,11 @@ static void emac_tx_fill_tpd(struct emac_adapter *adpt,
tpbuf = GET_TPD_BUFFER(tx_q, tx_q->tpd.produce_idx);
tpbuf->length = mapped_len;
- tpbuf->dma_addr = dma_map_single(adpt->netdev->dev.parent,
- skb->data, tpbuf->length,
- DMA_TO_DEVICE);
+ tpbuf->dma_addr = dma_map_page(adpt->netdev->dev.parent,
+ virt_to_page(skb->data),
+ offset_in_page(skb->data),
+ tpbuf->length,
+ DMA_TO_DEVICE);
ret = dma_mapping_error(adpt->netdev->dev.parent,
tpbuf->dma_addr);
if (ret)
@@ -1371,9 +1373,12 @@ static void emac_tx_fill_tpd(struct emac_adapter *adpt,
if (mapped_len < len) {
tpbuf = GET_TPD_BUFFER(tx_q, tx_q->tpd.produce_idx);
tpbuf->length = len - mapped_len;
- tpbuf->dma_addr = dma_map_single(adpt->netdev->dev.parent,
- skb->data + mapped_len,
- tpbuf->length, DMA_TO_DEVICE);
+ tpbuf->dma_addr = dma_map_page(adpt->netdev->dev.parent,
+ virt_to_page(skb->data +
+ mapped_len),
+ offset_in_page(skb->data +
+ mapped_len),
+ tpbuf->length, DMA_TO_DEVICE);
ret = dma_mapping_error(adpt->netdev->dev.parent,
tpbuf->dma_addr);
if (ret)
diff --git a/drivers/net/ethernet/smsc/smsc911x.c b/drivers/net/ethernet/smsc/smsc911x.c
index 012fb66eed8dd6..f0afb88d7bc2b0 100644
--- a/drivers/net/ethernet/smsc/smsc911x.c
+++ b/drivers/net/ethernet/smsc/smsc911x.c
@@ -2335,14 +2335,14 @@ static int smsc911x_drv_remove(struct platform_device *pdev)
pdata = netdev_priv(dev);
BUG_ON(!pdata);
BUG_ON(!pdata->ioaddr);
- WARN_ON(dev->phydev);
SMSC_TRACE(pdata, ifdown, "Stopping driver");
+ unregister_netdev(dev);
+
mdiobus_unregister(pdata->mii_bus);
mdiobus_free(pdata->mii_bus);
- unregister_netdev(dev);
res = platform_get_resource_byname(pdev, IORESOURCE_MEM,
"smsc911x-memory");
if (!res)
diff --git a/drivers/net/ethernet/socionext/sni_ave.c b/drivers/net/ethernet/socionext/sni_ave.c
index 111e7ca9df5600..f5c5984afefb98 100644
--- a/drivers/net/ethernet/socionext/sni_ave.c
+++ b/drivers/net/ethernet/socionext/sni_ave.c
@@ -1295,7 +1295,7 @@ static int ave_open(struct net_device *ndev)
val |= AVE_IIRQC_EN0 | (AVE_INTM_COUNT << 16);
writel(val, priv->base + AVE_IIRQC);
- val = AVE_GI_RXIINT | AVE_GI_RXOVF | AVE_GI_TX;
+ val = AVE_GI_RXIINT | AVE_GI_RXOVF | AVE_GI_TX | AVE_GI_RXDROP;
ave_irq_restore(ndev, val);
napi_enable(&priv->napi_rx);
diff --git a/drivers/net/ethernet/sun/sunvnet.c b/drivers/net/ethernet/sun/sunvnet.c
index 63d3d6b215f309..a94f50442613e9 100644
--- a/drivers/net/ethernet/sun/sunvnet.c
+++ b/drivers/net/ethernet/sun/sunvnet.c
@@ -312,7 +312,7 @@ static struct vnet *vnet_new(const u64 *local_mac,
dev->ethtool_ops = &vnet_ethtool_ops;
dev->watchdog_timeo = VNET_TX_TIMEOUT;
- dev->hw_features = NETIF_F_TSO | NETIF_F_GSO | NETIF_F_GSO_SOFTWARE |
+ dev->hw_features = NETIF_F_TSO | NETIF_F_GSO | NETIF_F_ALL_TSO |
NETIF_F_HW_CSUM | NETIF_F_SG;
dev->features = dev->hw_features;
diff --git a/drivers/net/ethernet/ti/cpsw.c b/drivers/net/ethernet/ti/cpsw.c
index 1b1b78fdc13849..b2b30c9df03770 100644
--- a/drivers/net/ethernet/ti/cpsw.c
+++ b/drivers/net/ethernet/ti/cpsw.c
@@ -1014,7 +1014,8 @@ static void _cpsw_adjust_link(struct cpsw_slave *slave,
/* set speed_in input in case RMII mode is used in 100Mbps */
if (phy->speed == 100)
mac_control |= BIT(15);
- else if (phy->speed == 10)
+ /* in band mode only works in 10Mbps RGMII mode */
+ else if ((phy->speed == 10) && phy_interface_is_rgmii(phy))
mac_control |= BIT(18); /* In Band mode */
if (priv->rx_pause)
diff --git a/drivers/net/hyperv/hyperv_net.h b/drivers/net/hyperv/hyperv_net.h
index 0db3bd1ea06f5a..32861036c3fcc9 100644
--- a/drivers/net/hyperv/hyperv_net.h
+++ b/drivers/net/hyperv/hyperv_net.h
@@ -173,6 +173,7 @@ struct rndis_device {
struct list_head req_list;
struct work_struct mcast_work;
+ u32 filter;
bool link_state; /* 0 - link up, 1 - link down */
@@ -211,7 +212,6 @@ void netvsc_channel_cb(void *context);
int netvsc_poll(struct napi_struct *napi, int budget);
void rndis_set_subchannel(struct work_struct *w);
-bool rndis_filter_opened(const struct netvsc_device *nvdev);
int rndis_filter_open(struct netvsc_device *nvdev);
int rndis_filter_close(struct netvsc_device *nvdev);
struct netvsc_device *rndis_filter_device_add(struct hv_device *dev,
diff --git a/drivers/net/hyperv/netvsc.c b/drivers/net/hyperv/netvsc.c
index 0265d703eb0305..7472172823f3ed 100644
--- a/drivers/net/hyperv/netvsc.c
+++ b/drivers/net/hyperv/netvsc.c
@@ -90,6 +90,11 @@ static void free_netvsc_device(struct rcu_head *head)
= container_of(head, struct netvsc_device, rcu);
int i;
+ kfree(nvdev->extension);
+ vfree(nvdev->recv_buf);
+ vfree(nvdev->send_buf);
+ kfree(nvdev->send_section_map);
+
for (i = 0; i < VRSS_CHANNEL_MAX; i++)
vfree(nvdev->chan_table[i].mrc.slots);
@@ -211,12 +216,6 @@ static void netvsc_teardown_gpadl(struct hv_device *device,
net_device->recv_buf_gpadl_handle = 0;
}
- if (net_device->recv_buf) {
- /* Free up the receive buffer */
- vfree(net_device->recv_buf);
- net_device->recv_buf = NULL;
- }
-
if (net_device->send_buf_gpadl_handle) {
ret = vmbus_teardown_gpadl(device->channel,
net_device->send_buf_gpadl_handle);
@@ -231,12 +230,6 @@ static void netvsc_teardown_gpadl(struct hv_device *device,
}
net_device->send_buf_gpadl_handle = 0;
}
- if (net_device->send_buf) {
- /* Free up the send buffer */
- vfree(net_device->send_buf);
- net_device->send_buf = NULL;
- }
- kfree(net_device->send_section_map);
}
int netvsc_alloc_recv_comp_ring(struct netvsc_device *net_device, u32 q_idx)
@@ -562,26 +555,29 @@ void netvsc_device_remove(struct hv_device *device)
= rtnl_dereference(net_device_ctx->nvdev);
int i;
- cancel_work_sync(&net_device->subchan_work);
-
netvsc_revoke_buf(device, net_device);
RCU_INIT_POINTER(net_device_ctx->nvdev, NULL);
+ /* And disassociate NAPI context from device */
+ for (i = 0; i < net_device->num_chn; i++)
+ netif_napi_del(&net_device->chan_table[i].napi);
+
/*
* At this point, no one should be accessing net_device
* except in here
*/
netdev_dbg(ndev, "net device safe to remove\n");
+ /* older versions require that buffer be revoked before close */
+ if (net_device->nvsp_version < NVSP_PROTOCOL_VERSION_4)
+ netvsc_teardown_gpadl(device, net_device);
+
/* Now, we can close the channel safely */
vmbus_close(device->channel);
- netvsc_teardown_gpadl(device, net_device);
-
- /* And dissassociate NAPI context from device */
- for (i = 0; i < net_device->num_chn; i++)
- netif_napi_del(&net_device->chan_table[i].napi);
+ if (net_device->nvsp_version >= NVSP_PROTOCOL_VERSION_4)
+ netvsc_teardown_gpadl(device, net_device);
/* Release all resources */
free_netvsc_device_rcu(net_device);
@@ -645,14 +641,18 @@ static void netvsc_send_tx_complete(struct netvsc_device *net_device,
queue_sends =
atomic_dec_return(&net_device->chan_table[q_idx].queue_sends);
- if (net_device->destroy && queue_sends == 0)
- wake_up(&net_device->wait_drain);
+ if (unlikely(net_device->destroy)) {
+ if (queue_sends == 0)
+ wake_up(&net_device->wait_drain);
+ } else {
+ struct netdev_queue *txq = netdev_get_tx_queue(ndev, q_idx);
- if (netif_tx_queue_stopped(netdev_get_tx_queue(ndev, q_idx)) &&
- (hv_ringbuf_avail_percent(&channel->outbound) > RING_AVAIL_PERCENT_HIWATER ||
- queue_sends < 1)) {
- netif_tx_wake_queue(netdev_get_tx_queue(ndev, q_idx));
- ndev_ctx->eth_stats.wake_queue++;
+ if (netif_tx_queue_stopped(txq) &&
+ (hv_ringbuf_avail_percent(&channel->outbound) > RING_AVAIL_PERCENT_HIWATER ||
+ queue_sends < 1)) {
+ netif_tx_wake_queue(txq);
+ ndev_ctx->eth_stats.wake_queue++;
+ }
}
}
diff --git a/drivers/net/hyperv/netvsc_drv.c b/drivers/net/hyperv/netvsc_drv.c
index cdb78eefab6714..f28c85d212cee1 100644
--- a/drivers/net/hyperv/netvsc_drv.c
+++ b/drivers/net/hyperv/netvsc_drv.c
@@ -46,7 +46,10 @@
#include "hyperv_net.h"
-#define RING_SIZE_MIN 64
+#define RING_SIZE_MIN 64
+#define RETRY_US_LO 5000
+#define RETRY_US_HI 10000
+#define RETRY_MAX 2000 /* >10 sec */
#define LINKCHANGE_INT (2 * HZ)
#define VF_TAKEOVER_INT (HZ / 10)
@@ -89,15 +92,20 @@ static void netvsc_change_rx_flags(struct net_device *net, int change)
static void netvsc_set_rx_mode(struct net_device *net)
{
struct net_device_context *ndev_ctx = netdev_priv(net);
- struct net_device *vf_netdev = rtnl_dereference(ndev_ctx->vf_netdev);
- struct netvsc_device *nvdev = rtnl_dereference(ndev_ctx->nvdev);
+ struct net_device *vf_netdev;
+ struct netvsc_device *nvdev;
+ rcu_read_lock();
+ vf_netdev = rcu_dereference(ndev_ctx->vf_netdev);
if (vf_netdev) {
dev_uc_sync(vf_netdev, net);
dev_mc_sync(vf_netdev, net);
}
- rndis_filter_update(nvdev);
+ nvdev = rcu_dereference(ndev_ctx->nvdev);
+ if (nvdev)
+ rndis_filter_update(nvdev);
+ rcu_read_unlock();
}
static int netvsc_open(struct net_device *net)
@@ -118,10 +126,8 @@ static int netvsc_open(struct net_device *net)
}
rdev = nvdev->extension;
- if (!rdev->link_state) {
+ if (!rdev->link_state)
netif_carrier_on(net);
- netif_tx_wake_all_queues(net);
- }
if (vf_netdev) {
/* Setting synthetic device up transparently sets
@@ -137,36 +143,25 @@ static int netvsc_open(struct net_device *net)
return 0;
}
-static int netvsc_close(struct net_device *net)
+static int netvsc_wait_until_empty(struct netvsc_device *nvdev)
{
- struct net_device_context *net_device_ctx = netdev_priv(net);
- struct net_device *vf_netdev
- = rtnl_dereference(net_device_ctx->vf_netdev);
- struct netvsc_device *nvdev = rtnl_dereference(net_device_ctx->nvdev);
- int ret = 0;
- u32 aread, i, msec = 10, retry = 0, retry_max = 20;
- struct vmbus_channel *chn;
-
- netif_tx_disable(net);
-
- /* No need to close rndis filter if it is removed already */
- if (!nvdev)
- goto out;
-
- ret = rndis_filter_close(nvdev);
- if (ret != 0) {
- netdev_err(net, "unable to close device (ret %d).\n", ret);
- return ret;
- }
+ unsigned int retry = 0;
+ int i;
/* Ensure pending bytes in ring are read */
- while (true) {
- aread = 0;
+ for (;;) {
+ u32 aread = 0;
+
for (i = 0; i < nvdev->num_chn; i++) {
- chn = nvdev->chan_table[i].channel;
+ struct vmbus_channel *chn
+ = nvdev->chan_table[i].channel;
+
if (!chn)
continue;
+ /* make sure receive not running now */
+ napi_synchronize(&nvdev->chan_table[i].napi);
+
aread = hv_get_bytes_to_read(&chn->inbound);
if (aread)
break;
@@ -176,22 +171,40 @@ static int netvsc_close(struct net_device *net)
break;
}
- retry++;
- if (retry > retry_max || aread == 0)
- break;
+ if (aread == 0)
+ return 0;
- msleep(msec);
+ if (++retry > RETRY_MAX)
+ return -ETIMEDOUT;
- if (msec < 1000)
- msec *= 2;
+ usleep_range(RETRY_US_LO, RETRY_US_HI);
}
+}
- if (aread) {
- netdev_err(net, "Ring buffer not empty after closing rndis\n");
- ret = -ETIMEDOUT;
+static int netvsc_close(struct net_device *net)
+{
+ struct net_device_context *net_device_ctx = netdev_priv(net);
+ struct net_device *vf_netdev
+ = rtnl_dereference(net_device_ctx->vf_netdev);
+ struct netvsc_device *nvdev = rtnl_dereference(net_device_ctx->nvdev);
+ int ret;
+
+ netif_tx_disable(net);
+
+ /* No need to close rndis filter if it is removed already */
+ if (!nvdev)
+ return 0;
+
+ ret = rndis_filter_close(nvdev);
+ if (ret != 0) {
+ netdev_err(net, "unable to close device (ret %d).\n", ret);
+ return ret;
}
-out:
+ ret = netvsc_wait_until_empty(nvdev);
+ if (ret)
+ netdev_err(net, "Ring buffer not empty after closing rndis\n");
+
if (vf_netdev)
dev_close(vf_netdev);
@@ -840,16 +853,81 @@ static void netvsc_get_channels(struct net_device *net,
}
}
+static int netvsc_detach(struct net_device *ndev,
+ struct netvsc_device *nvdev)
+{
+ struct net_device_context *ndev_ctx = netdev_priv(ndev);
+ struct hv_device *hdev = ndev_ctx->device_ctx;
+ int ret;
+
+ /* Don't try continuing to try and setup sub channels */
+ if (cancel_work_sync(&nvdev->subchan_work))
+ nvdev->num_chn = 1;
+
+ /* If device was up (receiving) then shutdown */
+ if (netif_running(ndev)) {
+ netif_tx_disable(ndev);
+
+ ret = rndis_filter_close(nvdev);
+ if (ret) {
+ netdev_err(ndev,
+ "unable to close device (ret %d).\n", ret);
+ return ret;
+ }
+
+ ret = netvsc_wait_until_empty(nvdev);
+ if (ret) {
+ netdev_err(ndev,
+ "Ring buffer not empty after closing rndis\n");
+ return ret;
+ }
+ }
+
+ netif_device_detach(ndev);
+
+ rndis_filter_device_remove(hdev, nvdev);
+
+ return 0;
+}
+
+static int netvsc_attach(struct net_device *ndev,
+ struct netvsc_device_info *dev_info)
+{
+ struct net_device_context *ndev_ctx = netdev_priv(ndev);
+ struct hv_device *hdev = ndev_ctx->device_ctx;
+ struct netvsc_device *nvdev;
+ struct rndis_device *rdev;
+ int ret;
+
+ nvdev = rndis_filter_device_add(hdev, dev_info);
+ if (IS_ERR(nvdev))
+ return PTR_ERR(nvdev);
+
+ /* Note: enable and attach happen when sub-channels setup */
+
+ netif_carrier_off(ndev);
+
+ if (netif_running(ndev)) {
+ ret = rndis_filter_open(nvdev);
+ if (ret)
+ return ret;
+
+ rdev = nvdev->extension;
+ if (!rdev->link_state)
+ netif_carrier_on(ndev);
+ }
+
+ return 0;
+}
+
static int netvsc_set_channels(struct net_device *net,
struct ethtool_channels *channels)
{
struct net_device_context *net_device_ctx = netdev_priv(net);
- struct hv_device *dev = net_device_ctx->device_ctx;
struct netvsc_device *nvdev = rtnl_dereference(net_device_ctx->nvdev);
unsigned int orig, count = channels->combined_count;
struct netvsc_device_info device_info;
- bool was_opened;
- int ret = 0;
+ int ret;
/* We do not support separate count for rx, tx, or other */
if (count == 0 ||
@@ -866,9 +944,6 @@ static int netvsc_set_channels(struct net_device *net,
return -EINVAL;
orig = nvdev->num_chn;
- was_opened = rndis_filter_opened(nvdev);
- if (was_opened)
- rndis_filter_close(nvdev);
memset(&device_info, 0, sizeof(device_info));
device_info.num_chn = count;
@@ -877,28 +952,17 @@ static int netvsc_set_channels(struct net_device *net,
device_info.recv_sections = nvdev->recv_section_cnt;
device_info.recv_section_size = nvdev->recv_section_size;
- rndis_filter_device_remove(dev, nvdev);
+ ret = netvsc_detach(net, nvdev);
+ if (ret)
+ return ret;
- nvdev = rndis_filter_device_add(dev, &device_info);
- if (IS_ERR(nvdev)) {
- ret = PTR_ERR(nvdev);
+ ret = netvsc_attach(net, &device_info);
+ if (ret) {
device_info.num_chn = orig;
- nvdev = rndis_filter_device_add(dev, &device_info);
-
- if (IS_ERR(nvdev)) {
- netdev_err(net, "restoring channel setting failed: %ld\n",
- PTR_ERR(nvdev));
- return ret;
- }
+ if (netvsc_attach(net, &device_info))
+ netdev_err(net, "restoring channel setting failed\n");
}
- if (was_opened)
- rndis_filter_open(nvdev);
-
- /* We may have missed link change notifications */
- net_device_ctx->last_reconfig = 0;
- schedule_delayed_work(&net_device_ctx->dwork, 0);
-
return ret;
}
@@ -964,10 +1028,8 @@ static int netvsc_change_mtu(struct net_device *ndev, int mtu)
struct net_device_context *ndevctx = netdev_priv(ndev);
struct net_device *vf_netdev = rtnl_dereference(ndevctx->vf_netdev);
struct netvsc_device *nvdev = rtnl_dereference(ndevctx->nvdev);
- struct hv_device *hdev = ndevctx->device_ctx;
int orig_mtu = ndev->mtu;
struct netvsc_device_info device_info;
- bool was_opened;
int ret = 0;
if (!nvdev || nvdev->destroy)
@@ -980,11 +1042,6 @@ static int netvsc_change_mtu(struct net_device *ndev, int mtu)
return ret;
}
- netif_device_detach(ndev);
- was_opened = rndis_filter_opened(nvdev);
- if (was_opened)
- rndis_filter_close(nvdev);
-
memset(&device_info, 0, sizeof(device_info));
device_info.num_chn = nvdev->num_chn;
device_info.send_sections = nvdev->send_section_cnt;
@@ -992,35 +1049,27 @@ static int netvsc_change_mtu(struct net_device *ndev, int mtu)
device_info.recv_sections = nvdev->recv_section_cnt;
device_info.recv_section_size = nvdev->recv_section_size;
- rndis_filter_device_remove(hdev, nvdev);
+ ret = netvsc_detach(ndev, nvdev);
+ if (ret)
+ goto rollback_vf;
ndev->mtu = mtu;
- nvdev = rndis_filter_device_add(hdev, &device_info);
- if (IS_ERR(nvdev)) {
- ret = PTR_ERR(nvdev);
-
- /* Attempt rollback to original MTU */
- ndev->mtu = orig_mtu;
- nvdev = rndis_filter_device_add(hdev, &device_info);
-
- if (vf_netdev)
- dev_set_mtu(vf_netdev, orig_mtu);
-
- if (IS_ERR(nvdev)) {
- netdev_err(ndev, "restoring mtu failed: %ld\n",
- PTR_ERR(nvdev));
- return ret;
- }
- }
+ ret = netvsc_attach(ndev, &device_info);
+ if (ret)
+ goto rollback;
- if (was_opened)
- rndis_filter_open(nvdev);
+ return 0;
- netif_device_attach(ndev);
+rollback:
+ /* Attempt rollback to original MTU */
+ ndev->mtu = orig_mtu;
- /* We may have missed link change notifications */
- schedule_delayed_work(&ndevctx->dwork, 0);
+ if (netvsc_attach(ndev, &device_info))
+ netdev_err(ndev, "restoring mtu failed\n");
+rollback_vf:
+ if (vf_netdev)
+ dev_set_mtu(vf_netdev, orig_mtu);
return ret;
}
@@ -1526,11 +1575,9 @@ static int netvsc_set_ringparam(struct net_device *ndev,
{
struct net_device_context *ndevctx = netdev_priv(ndev);
struct netvsc_device *nvdev = rtnl_dereference(ndevctx->nvdev);
- struct hv_device *hdev = ndevctx->device_ctx;
struct netvsc_device_info device_info;
struct ethtool_ringparam orig;
u32 new_tx, new_rx;
- bool was_opened;
int ret = 0;
if (!nvdev || nvdev->destroy)
@@ -1555,34 +1602,18 @@ static int netvsc_set_ringparam(struct net_device *ndev,
device_info.recv_sections = new_rx;
device_info.recv_section_size = nvdev->recv_section_size;
- netif_device_detach(ndev);
- was_opened = rndis_filter_opened(nvdev);
- if (was_opened)
- rndis_filter_close(nvdev);
-
- rndis_filter_device_remove(hdev, nvdev);
-
- nvdev = rndis_filter_device_add(hdev, &device_info);
- if (IS_ERR(nvdev)) {
- ret = PTR_ERR(nvdev);
+ ret = netvsc_detach(ndev, nvdev);
+ if (ret)
+ return ret;
+ ret = netvsc_attach(ndev, &device_info);
+ if (ret) {
device_info.send_sections = orig.tx_pending;
device_info.recv_sections = orig.rx_pending;
- nvdev = rndis_filter_device_add(hdev, &device_info);
- if (IS_ERR(nvdev)) {
- netdev_err(ndev, "restoring ringparam failed: %ld\n",
- PTR_ERR(nvdev));
- return ret;
- }
- }
-
- if (was_opened)
- rndis_filter_open(nvdev);
- netif_device_attach(ndev);
- /* We may have missed link change notifications */
- ndevctx->last_reconfig = 0;
- schedule_delayed_work(&ndevctx->dwork, 0);
+ if (netvsc_attach(ndev, &device_info))
+ netdev_err(ndev, "restoring ringparam failed");
+ }
return ret;
}
@@ -1846,8 +1877,12 @@ static void __netvsc_vf_setup(struct net_device *ndev,
/* set multicast etc flags on VF */
dev_change_flags(vf_netdev, ndev->flags | IFF_SLAVE);
+
+ /* sync address list from ndev to VF */
+ netif_addr_lock_bh(ndev);
dev_uc_sync(vf_netdev, ndev);
dev_mc_sync(vf_netdev, ndev);
+ netif_addr_unlock_bh(ndev);
if (netif_running(ndev)) {
ret = dev_open(vf_netdev);
@@ -2063,8 +2098,8 @@ no_net:
static int netvsc_remove(struct hv_device *dev)
{
struct net_device_context *ndev_ctx;
- struct net_device *vf_netdev;
- struct net_device *net;
+ struct net_device *vf_netdev, *net;
+ struct netvsc_device *nvdev;
net = hv_get_drvdata(dev);
if (net == NULL) {
@@ -2074,10 +2109,14 @@ static int netvsc_remove(struct hv_device *dev)
ndev_ctx = netdev_priv(net);
- netif_device_detach(net);
-
cancel_delayed_work_sync(&ndev_ctx->dwork);
+ rcu_read_lock();
+ nvdev = rcu_dereference(ndev_ctx->nvdev);
+
+ if (nvdev)
+ cancel_work_sync(&nvdev->subchan_work);
+
/*
* Call to the vsc driver to let it know that the device is being
* removed. Also blocks mtu and channel changes.
@@ -2087,11 +2126,13 @@ static int netvsc_remove(struct hv_device *dev)
if (vf_netdev)
netvsc_unregister_vf(vf_netdev);
+ if (nvdev)
+ rndis_filter_device_remove(dev, nvdev);
+
unregister_netdevice(net);
- rndis_filter_device_remove(dev,
- rtnl_dereference(ndev_ctx->nvdev));
rtnl_unlock();
+ rcu_read_unlock();
hv_set_drvdata(dev, NULL);
diff --git a/drivers/net/hyperv/rndis_filter.c b/drivers/net/hyperv/rndis_filter.c
index 8927c483c21738..a6ec41c399d6c9 100644
--- a/drivers/net/hyperv/rndis_filter.c
+++ b/drivers/net/hyperv/rndis_filter.c
@@ -264,13 +264,23 @@ static void rndis_set_link_state(struct rndis_device *rdev,
}
}
-static void rndis_filter_receive_response(struct rndis_device *dev,
- struct rndis_message *resp)
+static void rndis_filter_receive_response(struct net_device *ndev,
+ struct netvsc_device *nvdev,
+ const struct rndis_message *resp)
{
+ struct rndis_device *dev = nvdev->extension;
struct rndis_request *request = NULL;
bool found = false;
unsigned long flags;
- struct net_device *ndev = dev->ndev;
+
+ /* This should never happen, it means control message
+ * response received after device removed.
+ */
+ if (dev->state == RNDIS_DEV_UNINITIALIZED) {
+ netdev_err(ndev,
+ "got rndis message uninitialized\n");
+ return;
+ }
spin_lock_irqsave(&dev->request_lock, flags);
list_for_each_entry(request, &dev->req_list, list_ent) {
@@ -352,7 +362,6 @@ static inline void *rndis_get_ppi(struct rndis_packet *rpkt, u32 type)
static int rndis_filter_receive_data(struct net_device *ndev,
struct netvsc_device *nvdev,
- struct rndis_device *dev,
struct rndis_message *msg,
struct vmbus_channel *channel,
void *data, u32 data_buflen)
@@ -372,7 +381,7 @@ static int rndis_filter_receive_data(struct net_device *ndev,
* should be the data packet size plus the trailer padding size
*/
if (unlikely(data_buflen < rndis_pkt->data_len)) {
- netdev_err(dev->ndev, "rndis message buffer "
+ netdev_err(ndev, "rndis message buffer "
"overflow detected (got %u, min %u)"
"...dropping this message!\n",
data_buflen, rndis_pkt->data_len);
@@ -400,35 +409,20 @@ int rndis_filter_receive(struct net_device *ndev,
void *data, u32 buflen)
{
struct net_device_context *net_device_ctx = netdev_priv(ndev);
- struct rndis_device *rndis_dev = net_dev->extension;
struct rndis_message *rndis_msg = data;
- /* Make sure the rndis device state is initialized */
- if (unlikely(!rndis_dev)) {
- netif_dbg(net_device_ctx, rx_err, ndev,
- "got rndis message but no rndis device!\n");
- return NVSP_STAT_FAIL;
- }
-
- if (unlikely(rndis_dev->state == RNDIS_DEV_UNINITIALIZED)) {
- netif_dbg(net_device_ctx, rx_err, ndev,
- "got rndis message uninitialized\n");
- return NVSP_STAT_FAIL;
- }
-
if (netif_msg_rx_status(net_device_ctx))
dump_rndis_message(ndev, rndis_msg);
switch (rndis_msg->ndis_msg_type) {
case RNDIS_MSG_PACKET:
- return rndis_filter_receive_data(ndev, net_dev,
- rndis_dev, rndis_msg,
+ return rndis_filter_receive_data(ndev, net_dev, rndis_msg,
channel, data, buflen);
case RNDIS_MSG_INIT_C:
case RNDIS_MSG_QUERY_C:
case RNDIS_MSG_SET_C:
/* completion msgs */
- rndis_filter_receive_response(rndis_dev, rndis_msg);
+ rndis_filter_receive_response(ndev, net_dev, rndis_msg);
break;
case RNDIS_MSG_INDICATE:
@@ -825,13 +819,15 @@ static int rndis_filter_set_packet_filter(struct rndis_device *dev,
struct rndis_set_request *set;
int ret;
+ if (dev->filter == new_filter)
+ return 0;
+
request = get_rndis_request(dev, RNDIS_MSG_SET,
RNDIS_MESSAGE_SIZE(struct rndis_set_request) +
sizeof(u32));
if (!request)
return -ENOMEM;
-
/* Setup the rndis set */
set = &request->request_msg.msg.set_req;
set->oid = RNDIS_OID_GEN_CURRENT_PACKET_FILTER;
@@ -842,8 +838,10 @@ static int rndis_filter_set_packet_filter(struct rndis_device *dev,
&new_filter, sizeof(u32));
ret = rndis_filter_send_request(dev, request);
- if (ret == 0)
+ if (ret == 0) {
wait_for_completion(&request->wait_event);
+ dev->filter = new_filter;
+ }
put_rndis_request(dev, request);
@@ -861,9 +859,9 @@ static void rndis_set_multicast(struct work_struct *w)
filter = NDIS_PACKET_TYPE_PROMISCUOUS;
} else {
if (flags & IFF_ALLMULTI)
- flags |= NDIS_PACKET_TYPE_ALL_MULTICAST;
+ filter |= NDIS_PACKET_TYPE_ALL_MULTICAST;
if (flags & IFF_BROADCAST)
- flags |= NDIS_PACKET_TYPE_BROADCAST;
+ filter |= NDIS_PACKET_TYPE_BROADCAST;
}
rndis_filter_set_packet_filter(rdev, filter);
@@ -1120,6 +1118,7 @@ void rndis_set_subchannel(struct work_struct *w)
for (i = 0; i < VRSS_SEND_TAB_SIZE; i++)
ndev_ctx->tx_table[i] = i % nvdev->num_chn;
+ netif_device_attach(ndev);
rtnl_unlock();
return;
@@ -1130,6 +1129,8 @@ failed:
nvdev->max_chn = 1;
nvdev->num_chn = 1;
+
+ netif_device_attach(ndev);
unlock:
rtnl_unlock();
}
@@ -1332,6 +1333,10 @@ out:
net_device->num_chn = 1;
}
+ /* No sub channels, device is ready */
+ if (net_device->num_chn == 1)
+ netif_device_attach(net);
+
return net_device;
err_dev_remv:
@@ -1344,16 +1349,12 @@ void rndis_filter_device_remove(struct hv_device *dev,
{
struct rndis_device *rndis_dev = net_dev->extension;
- /* Don't try and setup sub channels if about to halt */
- cancel_work_sync(&net_dev->subchan_work);
-
/* Halt and release the rndis device */
rndis_filter_halt_device(rndis_dev);
net_dev->extension = NULL;
netvsc_device_remove(dev);
- kfree(rndis_dev);
}
int rndis_filter_open(struct netvsc_device *nvdev)
@@ -1371,10 +1372,3 @@ int rndis_filter_close(struct netvsc_device *nvdev)
return rndis_filter_close_device(nvdev->extension);
}
-
-bool rndis_filter_opened(const struct netvsc_device *nvdev)
-{
- const struct rndis_device *dev = nvdev->extension;
-
- return dev->state == RNDIS_DEV_DATAINITIALIZED;
-}
diff --git a/drivers/net/macsec.c b/drivers/net/macsec.c
index 7de88b33d5b96d..9cbb0c8a896aff 100644
--- a/drivers/net/macsec.c
+++ b/drivers/net/macsec.c
@@ -3277,7 +3277,7 @@ static int macsec_newlink(struct net *net, struct net_device *dev,
err = netdev_upper_dev_link(real_dev, dev, extack);
if (err < 0)
- goto unregister;
+ goto put_dev;
/* need to be already registered so that ->init has run and
* the MAC addr is set
@@ -3316,7 +3316,8 @@ del_dev:
macsec_del_dev(macsec);
unlink:
netdev_upper_dev_unlink(real_dev, dev);
-unregister:
+put_dev:
+ dev_put(real_dev);
unregister_netdevice(dev);
return err;
}
diff --git a/drivers/net/macvlan.c b/drivers/net/macvlan.c
index 8fc02d9db3d011..725f4b4afc6da9 100644
--- a/drivers/net/macvlan.c
+++ b/drivers/net/macvlan.c
@@ -1036,7 +1036,7 @@ static netdev_features_t macvlan_fix_features(struct net_device *dev,
lowerdev_features &= (features | ~NETIF_F_LRO);
features = netdev_increment_features(lowerdev_features, features, mask);
features |= ALWAYS_ON_FEATURES;
- features &= ~NETIF_F_NETNS_LOCAL;
+ features &= (ALWAYS_ON_FEATURES | MACVLAN_FEATURES);
return features;
}
diff --git a/drivers/net/phy/bcm-phy-lib.c b/drivers/net/phy/bcm-phy-lib.c
index 171010eb4d9c5c..5ad130c3da43c8 100644
--- a/drivers/net/phy/bcm-phy-lib.c
+++ b/drivers/net/phy/bcm-phy-lib.c
@@ -341,8 +341,8 @@ void bcm_phy_get_strings(struct phy_device *phydev, u8 *data)
unsigned int i;
for (i = 0; i < ARRAY_SIZE(bcm_phy_hw_stats); i++)
- memcpy(data + i * ETH_GSTRING_LEN,
- bcm_phy_hw_stats[i].string, ETH_GSTRING_LEN);
+ strlcpy(data + i * ETH_GSTRING_LEN,
+ bcm_phy_hw_stats[i].string, ETH_GSTRING_LEN);
}
EXPORT_SYMBOL_GPL(bcm_phy_get_strings);
diff --git a/drivers/net/phy/marvell.c b/drivers/net/phy/marvell.c
index 22d9bc9c33a4bc..0e0978d8a0eb33 100644
--- a/drivers/net/phy/marvell.c
+++ b/drivers/net/phy/marvell.c
@@ -1452,8 +1452,8 @@ static void marvell_get_strings(struct phy_device *phydev, u8 *data)
int i;
for (i = 0; i < ARRAY_SIZE(marvell_hw_stats); i++) {
- memcpy(data + i * ETH_GSTRING_LEN,
- marvell_hw_stats[i].string, ETH_GSTRING_LEN);
+ strlcpy(data + i * ETH_GSTRING_LEN,
+ marvell_hw_stats[i].string, ETH_GSTRING_LEN);
}
}
diff --git a/drivers/net/phy/micrel.c b/drivers/net/phy/micrel.c
index 0f45310300f667..f41b224a9cdbf4 100644
--- a/drivers/net/phy/micrel.c
+++ b/drivers/net/phy/micrel.c
@@ -635,25 +635,6 @@ static int ksz8873mll_config_aneg(struct phy_device *phydev)
return 0;
}
-/* This routine returns -1 as an indication to the caller that the
- * Micrel ksz9021 10/100/1000 PHY does not support standard IEEE
- * MMD extended PHY registers.
- */
-static int
-ksz9021_rd_mmd_phyreg(struct phy_device *phydev, int devad, u16 regnum)
-{
- return -1;
-}
-
-/* This routine does nothing since the Micrel ksz9021 does not support
- * standard IEEE MMD extended PHY registers.
- */
-static int
-ksz9021_wr_mmd_phyreg(struct phy_device *phydev, int devad, u16 regnum, u16 val)
-{
- return -1;
-}
-
static int kszphy_get_sset_count(struct phy_device *phydev)
{
return ARRAY_SIZE(kszphy_hw_stats);
@@ -664,8 +645,8 @@ static void kszphy_get_strings(struct phy_device *phydev, u8 *data)
int i;
for (i = 0; i < ARRAY_SIZE(kszphy_hw_stats); i++) {
- memcpy(data + i * ETH_GSTRING_LEN,
- kszphy_hw_stats[i].string, ETH_GSTRING_LEN);
+ strlcpy(data + i * ETH_GSTRING_LEN,
+ kszphy_hw_stats[i].string, ETH_GSTRING_LEN);
}
}
@@ -946,8 +927,8 @@ static struct phy_driver ksphy_driver[] = {
.get_stats = kszphy_get_stats,
.suspend = genphy_suspend,
.resume = genphy_resume,
- .read_mmd = ksz9021_rd_mmd_phyreg,
- .write_mmd = ksz9021_wr_mmd_phyreg,
+ .read_mmd = genphy_read_mmd_unsupported,
+ .write_mmd = genphy_write_mmd_unsupported,
}, {
.phy_id = PHY_ID_KSZ9031,
.phy_id_mask = MICREL_PHY_ID_MASK,
diff --git a/drivers/net/phy/phy.c b/drivers/net/phy/phy.c
index a6f924fee5840d..9aabfa1a455a89 100644
--- a/drivers/net/phy/phy.c
+++ b/drivers/net/phy/phy.c
@@ -618,6 +618,77 @@ static void phy_error(struct phy_device *phydev)
}
/**
+ * phy_disable_interrupts - Disable the PHY interrupts from the PHY side
+ * @phydev: target phy_device struct
+ */
+static int phy_disable_interrupts(struct phy_device *phydev)
+{
+ int err;
+
+ /* Disable PHY interrupts */
+ err = phy_config_interrupt(phydev, PHY_INTERRUPT_DISABLED);
+ if (err)
+ goto phy_err;
+
+ /* Clear the interrupt */
+ err = phy_clear_interrupt(phydev);
+ if (err)
+ goto phy_err;
+
+ return 0;
+
+phy_err:
+ phy_error(phydev);
+
+ return err;
+}
+
+/**
+ * phy_change - Called by the phy_interrupt to handle PHY changes
+ * @phydev: phy_device struct that interrupted
+ */
+static irqreturn_t phy_change(struct phy_device *phydev)
+{
+ if (phy_interrupt_is_valid(phydev)) {
+ if (phydev->drv->did_interrupt &&
+ !phydev->drv->did_interrupt(phydev))
+ return IRQ_NONE;
+
+ if (phydev->state == PHY_HALTED)
+ if (phy_disable_interrupts(phydev))
+ goto phy_err;
+ }
+
+ mutex_lock(&phydev->lock);
+ if ((PHY_RUNNING == phydev->state) || (PHY_NOLINK == phydev->state))
+ phydev->state = PHY_CHANGELINK;
+ mutex_unlock(&phydev->lock);
+
+ /* reschedule state queue work to run as soon as possible */
+ phy_trigger_machine(phydev, true);
+
+ if (phy_interrupt_is_valid(phydev) && phy_clear_interrupt(phydev))
+ goto phy_err;
+ return IRQ_HANDLED;
+
+phy_err:
+ phy_error(phydev);
+ return IRQ_NONE;
+}
+
+/**
+ * phy_change_work - Scheduled by the phy_mac_interrupt to handle PHY changes
+ * @work: work_struct that describes the work to be done
+ */
+void phy_change_work(struct work_struct *work)
+{
+ struct phy_device *phydev =
+ container_of(work, struct phy_device, phy_queue);
+
+ phy_change(phydev);
+}
+
+/**
* phy_interrupt - PHY interrupt handler
* @irq: interrupt line
* @phy_dat: phy_device pointer
@@ -632,9 +703,7 @@ static irqreturn_t phy_interrupt(int irq, void *phy_dat)
if (PHY_HALTED == phydev->state)
return IRQ_NONE; /* It can't be ours. */
- phy_change(phydev);
-
- return IRQ_HANDLED;
+ return phy_change(phydev);
}
/**
@@ -652,32 +721,6 @@ static int phy_enable_interrupts(struct phy_device *phydev)
}
/**
- * phy_disable_interrupts - Disable the PHY interrupts from the PHY side
- * @phydev: target phy_device struct
- */
-static int phy_disable_interrupts(struct phy_device *phydev)
-{
- int err;
-
- /* Disable PHY interrupts */
- err = phy_config_interrupt(phydev, PHY_INTERRUPT_DISABLED);
- if (err)
- goto phy_err;
-
- /* Clear the interrupt */
- err = phy_clear_interrupt(phydev);
- if (err)
- goto phy_err;
-
- return 0;
-
-phy_err:
- phy_error(phydev);
-
- return err;
-}
-
-/**
* phy_start_interrupts - request and enable interrupts for a PHY device
* @phydev: target phy_device struct
*
@@ -720,50 +763,6 @@ int phy_stop_interrupts(struct phy_device *phydev)
EXPORT_SYMBOL(phy_stop_interrupts);
/**
- * phy_change - Called by the phy_interrupt to handle PHY changes
- * @phydev: phy_device struct that interrupted
- */
-void phy_change(struct phy_device *phydev)
-{
- if (phy_interrupt_is_valid(phydev)) {
- if (phydev->drv->did_interrupt &&
- !phydev->drv->did_interrupt(phydev))
- return;
-
- if (phydev->state == PHY_HALTED)
- if (phy_disable_interrupts(phydev))
- goto phy_err;
- }
-
- mutex_lock(&phydev->lock);
- if ((PHY_RUNNING == phydev->state) || (PHY_NOLINK == phydev->state))
- phydev->state = PHY_CHANGELINK;
- mutex_unlock(&phydev->lock);
-
- /* reschedule state queue work to run as soon as possible */
- phy_trigger_machine(phydev, true);
-
- if (phy_interrupt_is_valid(phydev) && phy_clear_interrupt(phydev))
- goto phy_err;
- return;
-
-phy_err:
- phy_error(phydev);
-}
-
-/**
- * phy_change_work - Scheduled by the phy_mac_interrupt to handle PHY changes
- * @work: work_struct that describes the work to be done
- */
-void phy_change_work(struct work_struct *work)
-{
- struct phy_device *phydev =
- container_of(work, struct phy_device, phy_queue);
-
- phy_change(phydev);
-}
-
-/**
* phy_stop - Bring down the PHY link, and stop checking the status
* @phydev: target phy_device struct
*/
diff --git a/drivers/net/phy/phy_device.c b/drivers/net/phy/phy_device.c
index 478405e544cc87..74664a6c0cdc31 100644
--- a/drivers/net/phy/phy_device.c
+++ b/drivers/net/phy/phy_device.c
@@ -1012,10 +1012,17 @@ int phy_attach_direct(struct net_device *dev, struct phy_device *phydev,
err = sysfs_create_link(&phydev->mdio.dev.kobj, &dev->dev.kobj,
"attached_dev");
if (!err) {
- err = sysfs_create_link(&dev->dev.kobj, &phydev->mdio.dev.kobj,
- "phydev");
- if (err)
- goto error;
+ err = sysfs_create_link_nowarn(&dev->dev.kobj,
+ &phydev->mdio.dev.kobj,
+ "phydev");
+ if (err) {
+ dev_err(&dev->dev, "could not add device link to %s err %d\n",
+ kobject_name(&phydev->mdio.dev.kobj),
+ err);
+ /* non-fatal - some net drivers can use one netdevice
+ * with more then one phy
+ */
+ }
phydev->sysfs_links = true;
}
@@ -1666,6 +1673,23 @@ int genphy_config_init(struct phy_device *phydev)
}
EXPORT_SYMBOL(genphy_config_init);
+/* This is used for the phy device which doesn't support the MMD extended
+ * register access, but it does have side effect when we are trying to access
+ * the MMD register via indirect method.
+ */
+int genphy_read_mmd_unsupported(struct phy_device *phdev, int devad, u16 regnum)
+{
+ return -EOPNOTSUPP;
+}
+EXPORT_SYMBOL(genphy_read_mmd_unsupported);
+
+int genphy_write_mmd_unsupported(struct phy_device *phdev, int devnum,
+ u16 regnum, u16 val)
+{
+ return -EOPNOTSUPP;
+}
+EXPORT_SYMBOL(genphy_write_mmd_unsupported);
+
int genphy_suspend(struct phy_device *phydev)
{
return phy_set_bits(phydev, MII_BMCR, BMCR_PDOWN);
diff --git a/drivers/net/phy/realtek.c b/drivers/net/phy/realtek.c
index ee3ca4a2f12b41..9f48ecf9c62700 100644
--- a/drivers/net/phy/realtek.c
+++ b/drivers/net/phy/realtek.c
@@ -172,6 +172,8 @@ static struct phy_driver realtek_drvs[] = {
.flags = PHY_HAS_INTERRUPT,
.ack_interrupt = &rtl821x_ack_interrupt,
.config_intr = &rtl8211b_config_intr,
+ .read_mmd = &genphy_read_mmd_unsupported,
+ .write_mmd = &genphy_write_mmd_unsupported,
}, {
.phy_id = 0x001cc914,
.name = "RTL8211DN Gigabit Ethernet",
diff --git a/drivers/net/ppp/ppp_generic.c b/drivers/net/ppp/ppp_generic.c
index fa2a9bdd186606..da1937832c9902 100644
--- a/drivers/net/ppp/ppp_generic.c
+++ b/drivers/net/ppp/ppp_generic.c
@@ -257,7 +257,7 @@ struct ppp_net {
/* Prototypes. */
static int ppp_unattached_ioctl(struct net *net, struct ppp_file *pf,
struct file *file, unsigned int cmd, unsigned long arg);
-static void ppp_xmit_process(struct ppp *ppp);
+static void ppp_xmit_process(struct ppp *ppp, struct sk_buff *skb);
static void ppp_send_frame(struct ppp *ppp, struct sk_buff *skb);
static void ppp_push(struct ppp *ppp);
static void ppp_channel_push(struct channel *pch);
@@ -513,13 +513,12 @@ static ssize_t ppp_write(struct file *file, const char __user *buf,
goto out;
}
- skb_queue_tail(&pf->xq, skb);
-
switch (pf->kind) {
case INTERFACE:
- ppp_xmit_process(PF_TO_PPP(pf));
+ ppp_xmit_process(PF_TO_PPP(pf), skb);
break;
case CHANNEL:
+ skb_queue_tail(&pf->xq, skb);
ppp_channel_push(PF_TO_CHANNEL(pf));
break;
}
@@ -1267,8 +1266,8 @@ ppp_start_xmit(struct sk_buff *skb, struct net_device *dev)
put_unaligned_be16(proto, pp);
skb_scrub_packet(skb, !net_eq(ppp->ppp_net, dev_net(dev)));
- skb_queue_tail(&ppp->file.xq, skb);
- ppp_xmit_process(ppp);
+ ppp_xmit_process(ppp, skb);
+
return NETDEV_TX_OK;
outf:
@@ -1420,13 +1419,14 @@ static void ppp_setup(struct net_device *dev)
*/
/* Called to do any work queued up on the transmit side that can now be done */
-static void __ppp_xmit_process(struct ppp *ppp)
+static void __ppp_xmit_process(struct ppp *ppp, struct sk_buff *skb)
{
- struct sk_buff *skb;
-
ppp_xmit_lock(ppp);
if (!ppp->closing) {
ppp_push(ppp);
+
+ if (skb)
+ skb_queue_tail(&ppp->file.xq, skb);
while (!ppp->xmit_pending &&
(skb = skb_dequeue(&ppp->file.xq)))
ppp_send_frame(ppp, skb);
@@ -1440,7 +1440,7 @@ static void __ppp_xmit_process(struct ppp *ppp)
ppp_xmit_unlock(ppp);
}
-static void ppp_xmit_process(struct ppp *ppp)
+static void ppp_xmit_process(struct ppp *ppp, struct sk_buff *skb)
{
local_bh_disable();
@@ -1448,7 +1448,7 @@ static void ppp_xmit_process(struct ppp *ppp)
goto err;
(*this_cpu_ptr(ppp->xmit_recursion))++;
- __ppp_xmit_process(ppp);
+ __ppp_xmit_process(ppp, skb);
(*this_cpu_ptr(ppp->xmit_recursion))--;
local_bh_enable();
@@ -1458,6 +1458,8 @@ static void ppp_xmit_process(struct ppp *ppp)
err:
local_bh_enable();
+ kfree_skb(skb);
+
if (net_ratelimit())
netdev_err(ppp->dev, "recursion detected\n");
}
@@ -1942,7 +1944,7 @@ static void __ppp_channel_push(struct channel *pch)
if (skb_queue_empty(&pch->file.xq)) {
ppp = pch->ppp;
if (ppp)
- __ppp_xmit_process(ppp);
+ __ppp_xmit_process(ppp, NULL);
}
}
diff --git a/drivers/net/team/team.c b/drivers/net/team/team.c
index a468439969df7f..56c701b73c127c 100644
--- a/drivers/net/team/team.c
+++ b/drivers/net/team/team.c
@@ -2395,7 +2395,7 @@ send_done:
if (!nlh) {
err = __send_and_alloc_skb(&skb, team, portid, send_func);
if (err)
- goto errout;
+ return err;
goto send_done;
}
@@ -2681,7 +2681,7 @@ send_done:
if (!nlh) {
err = __send_and_alloc_skb(&skb, team, portid, send_func);
if (err)
- goto errout;
+ return err;
goto send_done;
}
diff --git a/drivers/net/tun.c b/drivers/net/tun.c
index 7433bb2e445163..28cfa642e39a25 100644
--- a/drivers/net/tun.c
+++ b/drivers/net/tun.c
@@ -655,7 +655,7 @@ static struct tun_struct *tun_enable_queue(struct tun_file *tfile)
return tun;
}
-static void tun_ptr_free(void *ptr)
+void tun_ptr_free(void *ptr)
{
if (!ptr)
return;
@@ -667,6 +667,7 @@ static void tun_ptr_free(void *ptr)
__skb_array_destroy_skb(ptr);
}
}
+EXPORT_SYMBOL_GPL(tun_ptr_free);
static void tun_queue_purge(struct tun_file *tfile)
{
diff --git a/drivers/net/usb/usbnet.c b/drivers/net/usb/usbnet.c
index 8a22ff67b0268a..d9eea8cfe6cb9a 100644
--- a/drivers/net/usb/usbnet.c
+++ b/drivers/net/usb/usbnet.c
@@ -315,6 +315,7 @@ static void __usbnet_status_stop_force(struct usbnet *dev)
void usbnet_skb_return (struct usbnet *dev, struct sk_buff *skb)
{
struct pcpu_sw_netstats *stats64 = this_cpu_ptr(dev->stats64);
+ unsigned long flags;
int status;
if (test_bit(EVENT_RX_PAUSED, &dev->flags)) {
@@ -326,10 +327,10 @@ void usbnet_skb_return (struct usbnet *dev, struct sk_buff *skb)
if (skb->protocol == 0)
skb->protocol = eth_type_trans (skb, dev->net);
- u64_stats_update_begin(&stats64->syncp);
+ flags = u64_stats_update_begin_irqsave(&stats64->syncp);
stats64->rx_packets++;
stats64->rx_bytes += skb->len;
- u64_stats_update_end(&stats64->syncp);
+ u64_stats_update_end_irqrestore(&stats64->syncp, flags);
netif_dbg(dev, rx_status, dev->net, "< rx, len %zu, type 0x%x\n",
skb->len + sizeof (struct ethhdr), skb->protocol);
@@ -1248,11 +1249,12 @@ static void tx_complete (struct urb *urb)
if (urb->status == 0) {
struct pcpu_sw_netstats *stats64 = this_cpu_ptr(dev->stats64);
+ unsigned long flags;
- u64_stats_update_begin(&stats64->syncp);
+ flags = u64_stats_update_begin_irqsave(&stats64->syncp);
stats64->tx_packets += entry->packets;
stats64->tx_bytes += entry->length;
- u64_stats_update_end(&stats64->syncp);
+ u64_stats_update_end_irqrestore(&stats64->syncp, flags);
} else {
dev->net->stats.tx_errors++;
diff --git a/drivers/net/vmxnet3/vmxnet3_drv.c b/drivers/net/vmxnet3/vmxnet3_drv.c
index 8b39c160743d41..e04937f44f3331 100644
--- a/drivers/net/vmxnet3/vmxnet3_drv.c
+++ b/drivers/net/vmxnet3/vmxnet3_drv.c
@@ -977,6 +977,8 @@ vmxnet3_tq_xmit(struct sk_buff *skb, struct vmxnet3_tx_queue *tq,
{
int ret;
u32 count;
+ int num_pkts;
+ int tx_num_deferred;
unsigned long flags;
struct vmxnet3_tx_ctx ctx;
union Vmxnet3_GenericDesc *gdesc;
@@ -1075,12 +1077,12 @@ vmxnet3_tq_xmit(struct sk_buff *skb, struct vmxnet3_tx_queue *tq,
#else
gdesc = ctx.sop_txd;
#endif
+ tx_num_deferred = le32_to_cpu(tq->shared->txNumDeferred);
if (ctx.mss) {
gdesc->txd.hlen = ctx.eth_ip_hdr_size + ctx.l4_hdr_size;
gdesc->txd.om = VMXNET3_OM_TSO;
gdesc->txd.msscof = ctx.mss;
- le32_add_cpu(&tq->shared->txNumDeferred, (skb->len -
- gdesc->txd.hlen + ctx.mss - 1) / ctx.mss);
+ num_pkts = (skb->len - gdesc->txd.hlen + ctx.mss - 1) / ctx.mss;
} else {
if (skb->ip_summed == CHECKSUM_PARTIAL) {
gdesc->txd.hlen = ctx.eth_ip_hdr_size;
@@ -1091,8 +1093,10 @@ vmxnet3_tq_xmit(struct sk_buff *skb, struct vmxnet3_tx_queue *tq,
gdesc->txd.om = 0;
gdesc->txd.msscof = 0;
}
- le32_add_cpu(&tq->shared->txNumDeferred, 1);
+ num_pkts = 1;
}
+ le32_add_cpu(&tq->shared->txNumDeferred, num_pkts);
+ tx_num_deferred += num_pkts;
if (skb_vlan_tag_present(skb)) {
gdesc->txd.ti = 1;
@@ -1118,8 +1122,7 @@ vmxnet3_tq_xmit(struct sk_buff *skb, struct vmxnet3_tx_queue *tq,
spin_unlock_irqrestore(&tq->tx_lock, flags);
- if (le32_to_cpu(tq->shared->txNumDeferred) >=
- le32_to_cpu(tq->shared->txThreshold)) {
+ if (tx_num_deferred >= le32_to_cpu(tq->shared->txThreshold)) {
tq->shared->txNumDeferred = 0;
VMXNET3_WRITE_BAR0_REG(adapter,
VMXNET3_REG_TXPROD + tq->qid * 8,
@@ -1470,7 +1473,8 @@ vmxnet3_rq_rx_complete(struct vmxnet3_rx_queue *rq,
vmxnet3_rx_csum(adapter, skb,
(union Vmxnet3_GenericDesc *)rcd);
skb->protocol = eth_type_trans(skb, adapter->netdev);
- if (!rcd->tcp || !adapter->lro)
+ if (!rcd->tcp ||
+ !(adapter->netdev->features & NETIF_F_LRO))
goto not_lro;
if (segCnt != 0 && mss != 0) {
diff --git a/drivers/net/vmxnet3/vmxnet3_int.h b/drivers/net/vmxnet3/vmxnet3_int.h
index 5ba222920e8009..59ec34052a651e 100644
--- a/drivers/net/vmxnet3/vmxnet3_int.h
+++ b/drivers/net/vmxnet3/vmxnet3_int.h
@@ -69,10 +69,10 @@
/*
* Version numbers
*/
-#define VMXNET3_DRIVER_VERSION_STRING "1.4.11.0-k"
+#define VMXNET3_DRIVER_VERSION_STRING "1.4.13.0-k"
/* a 32-bit int, each byte encode a verion number in VMXNET3_DRIVER_VERSION */
-#define VMXNET3_DRIVER_VERSION_NUM 0x01040b00
+#define VMXNET3_DRIVER_VERSION_NUM 0x01040d00
#if defined(CONFIG_PCI_MSI)
/* RSS only makes sense if MSI-X is supported. */
@@ -342,9 +342,6 @@ struct vmxnet3_adapter {
u8 __iomem *hw_addr1; /* for BAR 1 */
u8 version;
- bool rxcsum;
- bool lro;
-
#ifdef VMXNET3_RSS
struct UPT1_RSSConf *rss_conf;
bool rss;
diff --git a/drivers/net/wireless/ath/ath9k/htc_drv_init.c b/drivers/net/wireless/ath/ath9k/htc_drv_init.c
index e89e5ef2c2a4ed..f246e9ed4a814d 100644
--- a/drivers/net/wireless/ath/ath9k/htc_drv_init.c
+++ b/drivers/net/wireless/ath/ath9k/htc_drv_init.c
@@ -729,6 +729,7 @@ static void ath9k_set_hw_capab(struct ath9k_htc_priv *priv,
ieee80211_hw_set(hw, SPECTRUM_MGMT);
ieee80211_hw_set(hw, SIGNAL_DBM);
ieee80211_hw_set(hw, AMPDU_AGGREGATION);
+ ieee80211_hw_set(hw, DOESNT_SUPPORT_QOS_NDP);
if (ath9k_ps_enable)
ieee80211_hw_set(hw, SUPPORTS_PS);
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/core.h b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/core.h
index df8a1ecb992415..232dcbb8331111 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/core.h
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/core.h
@@ -181,6 +181,7 @@ enum brcmf_netif_stop_reason {
* @netif_stop_lock: spinlock for update netif_stop from multiple sources.
* @pend_8021x_cnt: tracks outstanding number of 802.1x frames.
* @pend_8021x_wait: used for signalling change in count.
+ * @fwil_fwerr: flag indicating fwil layer should return firmware error codes.
*/
struct brcmf_if {
struct brcmf_pub *drvr;
@@ -198,6 +199,7 @@ struct brcmf_if {
wait_queue_head_t pend_8021x_wait;
struct in6_addr ipv6_addr_tbl[NDOL_MAX_ENTRIES];
u8 ipv6addr_idx;
+ bool fwil_fwerr;
};
int brcmf_netdev_wait_pend8021x(struct brcmf_if *ifp);
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/feature.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/feature.c
index 47de35a338532f..bede7b7fd9962c 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/feature.c
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/feature.c
@@ -104,6 +104,9 @@ static void brcmf_feat_iovar_int_get(struct brcmf_if *ifp,
u32 data;
int err;
+ /* we need to know firmware error */
+ ifp->fwil_fwerr = true;
+
err = brcmf_fil_iovar_int_get(ifp, name, &data);
if (err == 0) {
brcmf_dbg(INFO, "enabling feature: %s\n", brcmf_feat_names[id]);
@@ -112,6 +115,8 @@ static void brcmf_feat_iovar_int_get(struct brcmf_if *ifp,
brcmf_dbg(TRACE, "%s feature check failed: %d\n",
brcmf_feat_names[id], err);
}
+
+ ifp->fwil_fwerr = false;
}
static void brcmf_feat_iovar_data_set(struct brcmf_if *ifp,
@@ -120,6 +125,9 @@ static void brcmf_feat_iovar_data_set(struct brcmf_if *ifp,
{
int err;
+ /* we need to know firmware error */
+ ifp->fwil_fwerr = true;
+
err = brcmf_fil_iovar_data_set(ifp, name, data, len);
if (err != -BRCMF_FW_UNSUPPORTED) {
brcmf_dbg(INFO, "enabling feature: %s\n", brcmf_feat_names[id]);
@@ -128,6 +136,8 @@ static void brcmf_feat_iovar_data_set(struct brcmf_if *ifp,
brcmf_dbg(TRACE, "%s feature check failed: %d\n",
brcmf_feat_names[id], err);
}
+
+ ifp->fwil_fwerr = false;
}
#define MAX_CAPS_BUFFER_SIZE 512
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwil.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwil.c
index f2cfdd3b2bf1a2..fc5751116d99bf 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwil.c
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwil.c
@@ -131,6 +131,9 @@ brcmf_fil_cmd_data(struct brcmf_if *ifp, u32 cmd, void *data, u32 len, bool set)
brcmf_fil_get_errstr((u32)(-fwerr)), fwerr);
err = -EBADE;
}
+ if (ifp->fwil_fwerr)
+ return fwerr;
+
return err;
}
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/p2p.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/p2p.c
index 2ee54133efa1c0..82064e90978497 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/p2p.c
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/p2p.c
@@ -462,25 +462,23 @@ static int brcmf_p2p_set_firmware(struct brcmf_if *ifp, u8 *p2p_mac)
* @dev_addr: optional device address.
*
* P2P needs mac addresses for P2P device and interface. If no device
- * address it specified, these are derived from the primary net device, ie.
- * the permanent ethernet address of the device.
+ * address it specified, these are derived from a random ethernet
+ * address.
*/
static void brcmf_p2p_generate_bss_mac(struct brcmf_p2p_info *p2p, u8 *dev_addr)
{
- struct brcmf_if *pri_ifp = p2p->bss_idx[P2PAPI_BSSCFG_PRIMARY].vif->ifp;
- bool local_admin = false;
+ bool random_addr = false;
- if (!dev_addr || is_zero_ether_addr(dev_addr)) {
- dev_addr = pri_ifp->mac_addr;
- local_admin = true;
- }
+ if (!dev_addr || is_zero_ether_addr(dev_addr))
+ random_addr = true;
- /* Generate the P2P Device Address. This consists of the device's
- * primary MAC address with the locally administered bit set.
+ /* Generate the P2P Device Address obtaining a random ethernet
+ * address with the locally administered bit set.
*/
- memcpy(p2p->dev_addr, dev_addr, ETH_ALEN);
- if (local_admin)
- p2p->dev_addr[0] |= 0x02;
+ if (random_addr)
+ eth_random_addr(p2p->dev_addr);
+ else
+ memcpy(p2p->dev_addr, dev_addr, ETH_ALEN);
/* Generate the P2P Interface Address. If the discovery and connection
* BSSCFGs need to simultaneously co-exist, then this address must be
diff --git a/drivers/net/wireless/intel/iwlwifi/Kconfig b/drivers/net/wireless/intel/iwlwifi/Kconfig
index c5f2ddf9b0fe5f..e5a2fc738ac361 100644
--- a/drivers/net/wireless/intel/iwlwifi/Kconfig
+++ b/drivers/net/wireless/intel/iwlwifi/Kconfig
@@ -91,7 +91,6 @@ config IWLWIFI_BCAST_FILTERING
config IWLWIFI_PCIE_RTPM
bool "Enable runtime power management mode for PCIe devices"
depends on IWLMVM && PM && EXPERT
- default false
help
Say Y here to enable runtime power management for PCIe
devices. If enabled, the device will go into low power mode
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/api/time-event.h b/drivers/net/wireless/intel/iwlwifi/fw/api/time-event.h
index 3721a3ed358b83..f824bebceb0608 100644
--- a/drivers/net/wireless/intel/iwlwifi/fw/api/time-event.h
+++ b/drivers/net/wireless/intel/iwlwifi/fw/api/time-event.h
@@ -211,7 +211,7 @@ enum {
* @TE_V2_NOTIF_HOST_FRAG_END:request/receive notification on frag end
* @TE_V2_NOTIF_INTERNAL_FRAG_START: internal FW use.
* @TE_V2_NOTIF_INTERNAL_FRAG_END: internal FW use.
- * @T2_V2_START_IMMEDIATELY: start time event immediately
+ * @TE_V2_START_IMMEDIATELY: start time event immediately
* @TE_V2_DEP_OTHER: depends on another time event
* @TE_V2_DEP_TSF: depends on a specific time
* @TE_V2_EVENT_SOCIOPATHIC: can't co-exist with other events of tha same MAC
@@ -230,7 +230,7 @@ enum iwl_time_event_policy {
TE_V2_NOTIF_HOST_FRAG_END = BIT(5),
TE_V2_NOTIF_INTERNAL_FRAG_START = BIT(6),
TE_V2_NOTIF_INTERNAL_FRAG_END = BIT(7),
- T2_V2_START_IMMEDIATELY = BIT(11),
+ TE_V2_START_IMMEDIATELY = BIT(11),
/* placement characteristics */
TE_V2_DEP_OTHER = BIT(TE_V2_PLACEMENT_POS),
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/dbg.c b/drivers/net/wireless/intel/iwlwifi/fw/dbg.c
index 67aefc8fc9acc5..7bd704a3e6409b 100644
--- a/drivers/net/wireless/intel/iwlwifi/fw/dbg.c
+++ b/drivers/net/wireless/intel/iwlwifi/fw/dbg.c
@@ -8,6 +8,7 @@
* Copyright(c) 2008 - 2014 Intel Corporation. All rights reserved.
* Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
* Copyright(c) 2015 - 2017 Intel Deutschland GmbH
+ * Copyright(c) 2018 Intel Corporation
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of version 2 of the GNU General Public License as
@@ -33,6 +34,7 @@
* Copyright(c) 2005 - 2014 Intel Corporation. All rights reserved.
* Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
* Copyright(c) 2015 - 2017 Intel Deutschland GmbH
+ * Copyright(c) 2018 Intel Corporation
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -942,7 +944,6 @@ dump_trans_data:
out:
iwl_fw_free_dump_desc(fwrt);
- fwrt->dump.trig = NULL;
clear_bit(IWL_FWRT_STATUS_DUMPING, &fwrt->status);
IWL_DEBUG_INFO(fwrt, "WRT dump done\n");
}
@@ -1112,6 +1113,14 @@ void iwl_fw_error_dump_wk(struct work_struct *work)
fwrt->ops->dump_start(fwrt->ops_ctx))
return;
+ if (fwrt->ops && fwrt->ops->fw_running &&
+ !fwrt->ops->fw_running(fwrt->ops_ctx)) {
+ IWL_ERR(fwrt, "Firmware not running - cannot dump error\n");
+ iwl_fw_free_dump_desc(fwrt);
+ clear_bit(IWL_FWRT_STATUS_DUMPING, &fwrt->status);
+ goto out;
+ }
+
if (fwrt->trans->cfg->device_family == IWL_DEVICE_FAMILY_7000) {
/* stop recording */
iwl_fw_dbg_stop_recording(fwrt);
@@ -1145,7 +1154,7 @@ void iwl_fw_error_dump_wk(struct work_struct *work)
iwl_write_prph(fwrt->trans, DBGC_OUT_CTRL, out_ctrl);
}
}
-
+out:
if (fwrt->ops && fwrt->ops->dump_end)
fwrt->ops->dump_end(fwrt->ops_ctx);
}
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/dbg.h b/drivers/net/wireless/intel/iwlwifi/fw/dbg.h
index 223fb77a3aa9d6..72259bff9922f7 100644
--- a/drivers/net/wireless/intel/iwlwifi/fw/dbg.h
+++ b/drivers/net/wireless/intel/iwlwifi/fw/dbg.h
@@ -8,6 +8,7 @@
* Copyright(c) 2008 - 2014 Intel Corporation. All rights reserved.
* Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
* Copyright(c) 2015 - 2017 Intel Deutschland GmbH
+ * Copyright(c) 2018 Intel Corporation
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of version 2 of the GNU General Public License as
@@ -33,6 +34,7 @@
* Copyright(c) 2005 - 2014 Intel Corporation. All rights reserved.
* Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
* Copyright(c) 2015 - 2017 Intel Deutschland GmbH
+ * Copyright(c) 2018 Intel Corporation
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -91,6 +93,7 @@ static inline void iwl_fw_free_dump_desc(struct iwl_fw_runtime *fwrt)
if (fwrt->dump.desc != &iwl_dump_desc_assert)
kfree(fwrt->dump.desc);
fwrt->dump.desc = NULL;
+ fwrt->dump.trig = NULL;
}
void iwl_fw_error_dump(struct iwl_fw_runtime *fwrt);
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/debugfs.h b/drivers/net/wireless/intel/iwlwifi/fw/debugfs.h
index e57ff92a68ae20..3da468d2cc92f5 100644
--- a/drivers/net/wireless/intel/iwlwifi/fw/debugfs.h
+++ b/drivers/net/wireless/intel/iwlwifi/fw/debugfs.h
@@ -75,6 +75,20 @@ static inline void iwl_fw_cancel_timestamp(struct iwl_fw_runtime *fwrt)
cancel_delayed_work_sync(&fwrt->timestamp.wk);
}
+static inline void iwl_fw_suspend_timestamp(struct iwl_fw_runtime *fwrt)
+{
+ cancel_delayed_work_sync(&fwrt->timestamp.wk);
+}
+
+static inline void iwl_fw_resume_timestamp(struct iwl_fw_runtime *fwrt)
+{
+ if (!fwrt->timestamp.delay)
+ return;
+
+ schedule_delayed_work(&fwrt->timestamp.wk,
+ round_jiffies_relative(fwrt->timestamp.delay));
+}
+
#else
static inline int iwl_fwrt_dbgfs_register(struct iwl_fw_runtime *fwrt,
struct dentry *dbgfs_dir)
@@ -84,4 +98,8 @@ static inline int iwl_fwrt_dbgfs_register(struct iwl_fw_runtime *fwrt,
static inline void iwl_fw_cancel_timestamp(struct iwl_fw_runtime *fwrt) {}
+static inline void iwl_fw_suspend_timestamp(struct iwl_fw_runtime *fwrt) {}
+
+static inline void iwl_fw_resume_timestamp(struct iwl_fw_runtime *fwrt) {}
+
#endif /* CONFIG_IWLWIFI_DEBUGFS */
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/init.c b/drivers/net/wireless/intel/iwlwifi/fw/init.c
index c39fe84bb4c4a9..2efac307909e1c 100644
--- a/drivers/net/wireless/intel/iwlwifi/fw/init.c
+++ b/drivers/net/wireless/intel/iwlwifi/fw/init.c
@@ -77,8 +77,14 @@ void iwl_fw_runtime_init(struct iwl_fw_runtime *fwrt, struct iwl_trans *trans,
}
IWL_EXPORT_SYMBOL(iwl_fw_runtime_init);
-void iwl_fw_runtime_exit(struct iwl_fw_runtime *fwrt)
+void iwl_fw_runtime_suspend(struct iwl_fw_runtime *fwrt)
{
- iwl_fw_cancel_timestamp(fwrt);
+ iwl_fw_suspend_timestamp(fwrt);
}
-IWL_EXPORT_SYMBOL(iwl_fw_runtime_exit);
+IWL_EXPORT_SYMBOL(iwl_fw_runtime_suspend);
+
+void iwl_fw_runtime_resume(struct iwl_fw_runtime *fwrt)
+{
+ iwl_fw_resume_timestamp(fwrt);
+}
+IWL_EXPORT_SYMBOL(iwl_fw_runtime_resume);
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/runtime.h b/drivers/net/wireless/intel/iwlwifi/fw/runtime.h
index e25c049f980f27..3fb940ebd74aae 100644
--- a/drivers/net/wireless/intel/iwlwifi/fw/runtime.h
+++ b/drivers/net/wireless/intel/iwlwifi/fw/runtime.h
@@ -6,6 +6,7 @@
* GPL LICENSE SUMMARY
*
* Copyright(c) 2017 Intel Deutschland GmbH
+ * Copyright(c) 2018 Intel Corporation
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of version 2 of the GNU General Public License as
@@ -26,6 +27,7 @@
* BSD LICENSE
*
* Copyright(c) 2017 Intel Deutschland GmbH
+ * Copyright(c) 2018 Intel Corporation
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -68,6 +70,7 @@
struct iwl_fw_runtime_ops {
int (*dump_start)(void *ctx);
void (*dump_end)(void *ctx);
+ bool (*fw_running)(void *ctx);
};
#define MAX_NUM_LMAC 2
@@ -150,6 +153,10 @@ void iwl_fw_runtime_init(struct iwl_fw_runtime *fwrt, struct iwl_trans *trans,
void iwl_fw_runtime_exit(struct iwl_fw_runtime *fwrt);
+void iwl_fw_runtime_suspend(struct iwl_fw_runtime *fwrt);
+
+void iwl_fw_runtime_resume(struct iwl_fw_runtime *fwrt);
+
static inline void iwl_fw_set_current_image(struct iwl_fw_runtime *fwrt,
enum iwl_ucode_type cur_fw_img)
{
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/d3.c b/drivers/net/wireless/intel/iwlwifi/mvm/d3.c
index 0e6cf39285f405..2efe9b099556d8 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/d3.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/d3.c
@@ -1098,6 +1098,8 @@ int iwl_mvm_suspend(struct ieee80211_hw *hw, struct cfg80211_wowlan *wowlan)
/* make sure the d0i3 exit work is not pending */
flush_work(&mvm->d0i3_exit_work);
+ iwl_fw_runtime_suspend(&mvm->fwrt);
+
ret = iwl_trans_suspend(trans);
if (ret)
return ret;
@@ -2012,6 +2014,8 @@ int iwl_mvm_resume(struct ieee80211_hw *hw)
mvm->trans->system_pm_mode = IWL_PLAT_PM_MODE_DISABLED;
+ iwl_fw_runtime_resume(&mvm->fwrt);
+
return ret;
}
@@ -2038,6 +2042,8 @@ static int iwl_mvm_d3_test_open(struct inode *inode, struct file *file)
mvm->trans->system_pm_mode = IWL_PLAT_PM_MODE_D3;
+ iwl_fw_runtime_suspend(&mvm->fwrt);
+
/* start pseudo D3 */
rtnl_lock();
err = __iwl_mvm_suspend(mvm->hw, mvm->hw->wiphy->wowlan_config, true);
@@ -2098,6 +2104,8 @@ static int iwl_mvm_d3_test_release(struct inode *inode, struct file *file)
__iwl_mvm_resume(mvm, true);
rtnl_unlock();
+ iwl_fw_runtime_resume(&mvm->fwrt);
+
mvm->trans->system_pm_mode = IWL_PLAT_PM_MODE_DISABLED;
iwl_abort_notification_waits(&mvm->notif_wait);
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/debugfs.c b/drivers/net/wireless/intel/iwlwifi/mvm/debugfs.c
index a7892c1254a293..9c436d8d001d39 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/debugfs.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/debugfs.c
@@ -8,6 +8,7 @@
* Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
* Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
* Copyright(c) 2016 - 2017 Intel Deutschland GmbH
+ * Copyright(c) 2018 Intel Corporation
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of version 2 of the GNU General Public License as
@@ -35,6 +36,7 @@
* Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
* Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
* Copyright(c) 2016 - 2017 Intel Deutschland GmbH
+ * Copyright(c) 2018 Intel Corporation
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -1281,9 +1283,6 @@ static ssize_t iwl_dbgfs_fw_dbg_collect_write(struct iwl_mvm *mvm,
{
int ret;
- if (!iwl_mvm_firmware_running(mvm))
- return -EIO;
-
ret = iwl_mvm_ref_sync(mvm, IWL_MVM_REF_PRPH_WRITE);
if (ret)
return ret;
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/mac-ctxt.c b/drivers/net/wireless/intel/iwlwifi/mvm/mac-ctxt.c
index 2f22e14e00fe88..8ba16fc24e3af0 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/mac-ctxt.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/mac-ctxt.c
@@ -438,7 +438,8 @@ int iwl_mvm_mac_ctxt_init(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
}
/* Allocate the CAB queue for softAP and GO interfaces */
- if (vif->type == NL80211_IFTYPE_AP) {
+ if (vif->type == NL80211_IFTYPE_AP ||
+ vif->type == NL80211_IFTYPE_ADHOC) {
/*
* For TVQM this will be overwritten later with the FW assigned
* queue value (when queue is enabled).
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c b/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c
index 8aed40a8bc385f..ebf511150f4d02 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c
@@ -8,6 +8,7 @@
* Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
* Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
* Copyright(c) 2016 - 2017 Intel Deutschland GmbH
+ * Copyright(c) 2018 Intel Corporation
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of version 2 of the GNU General Public License as
@@ -2106,15 +2107,40 @@ static int iwl_mvm_start_ap_ibss(struct ieee80211_hw *hw,
if (ret)
goto out_remove;
- ret = iwl_mvm_add_mcast_sta(mvm, vif);
- if (ret)
- goto out_unbind;
-
- /* Send the bcast station. At this stage the TBTT and DTIM time events
- * are added and applied to the scheduler */
- ret = iwl_mvm_send_add_bcast_sta(mvm, vif);
- if (ret)
- goto out_rm_mcast;
+ /*
+ * This is not very nice, but the simplest:
+ * For older FWs adding the mcast sta before the bcast station may
+ * cause assert 0x2b00.
+ * This is fixed in later FW so make the order of removal depend on
+ * the TLV
+ */
+ if (fw_has_api(&mvm->fw->ucode_capa, IWL_UCODE_TLV_API_STA_TYPE)) {
+ ret = iwl_mvm_add_mcast_sta(mvm, vif);
+ if (ret)
+ goto out_unbind;
+ /*
+ * Send the bcast station. At this stage the TBTT and DTIM time
+ * events are added and applied to the scheduler
+ */
+ ret = iwl_mvm_send_add_bcast_sta(mvm, vif);
+ if (ret) {
+ iwl_mvm_rm_mcast_sta(mvm, vif);
+ goto out_unbind;
+ }
+ } else {
+ /*
+ * Send the bcast station. At this stage the TBTT and DTIM time
+ * events are added and applied to the scheduler
+ */
+ iwl_mvm_send_add_bcast_sta(mvm, vif);
+ if (ret)
+ goto out_unbind;
+ iwl_mvm_add_mcast_sta(mvm, vif);
+ if (ret) {
+ iwl_mvm_send_rm_bcast_sta(mvm, vif);
+ goto out_unbind;
+ }
+ }
/* must be set before quota calculations */
mvmvif->ap_ibss_active = true;
@@ -2144,7 +2170,6 @@ out_quota_failed:
iwl_mvm_power_update_mac(mvm);
mvmvif->ap_ibss_active = false;
iwl_mvm_send_rm_bcast_sta(mvm, vif);
-out_rm_mcast:
iwl_mvm_rm_mcast_sta(mvm, vif);
out_unbind:
iwl_mvm_binding_remove_vif(mvm, vif);
@@ -2682,6 +2707,10 @@ static int iwl_mvm_mac_sta_state(struct ieee80211_hw *hw,
/* enable beacon filtering */
WARN_ON(iwl_mvm_enable_beacon_filter(mvm, vif, 0));
+
+ iwl_mvm_rs_rate_init(mvm, sta, mvmvif->phy_ctxt->channel->band,
+ false);
+
ret = 0;
} else if (old_state == IEEE80211_STA_AUTHORIZED &&
new_state == IEEE80211_STA_ASSOC) {
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/mvm.h b/drivers/net/wireless/intel/iwlwifi/mvm/mvm.h
index 2d28e08042186e..89ff02d7c87663 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/mvm.h
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/mvm.h
@@ -90,6 +90,7 @@
#include "fw/runtime.h"
#include "fw/dbg.h"
#include "fw/acpi.h"
+#include "fw/debugfs.h"
#define IWL_MVM_MAX_ADDRESSES 5
/* RSSI offset for WkP */
@@ -1783,6 +1784,7 @@ static inline u32 iwl_mvm_flushable_queues(struct iwl_mvm *mvm)
static inline void iwl_mvm_stop_device(struct iwl_mvm *mvm)
{
+ iwl_fw_cancel_timestamp(&mvm->fwrt);
iwl_free_fw_paging(&mvm->fwrt);
clear_bit(IWL_MVM_STATUS_FIRMWARE_RUNNING, &mvm->status);
iwl_fw_dump_conf_clear(&mvm->fwrt);
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/ops.c b/drivers/net/wireless/intel/iwlwifi/mvm/ops.c
index 5d525a0023dc3f..ab7fb5aad984a5 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/ops.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/ops.c
@@ -8,6 +8,7 @@
* Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
* Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
* Copyright(c) 2016 - 2017 Intel Deutschland GmbH
+ * Copyright(c) 2018 Intel Corporation
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of version 2 of the GNU General Public License as
@@ -35,6 +36,7 @@
* Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
* Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
* Copyright(c) 2016 - 2017 Intel Deutschland GmbH
+ * Copyright(c) 2018 Intel Corporation
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -552,9 +554,15 @@ static void iwl_mvm_fwrt_dump_end(void *ctx)
iwl_mvm_unref(mvm, IWL_MVM_REF_FW_DBG_COLLECT);
}
+static bool iwl_mvm_fwrt_fw_running(void *ctx)
+{
+ return iwl_mvm_firmware_running(ctx);
+}
+
static const struct iwl_fw_runtime_ops iwl_mvm_fwrt_ops = {
.dump_start = iwl_mvm_fwrt_dump_start,
.dump_end = iwl_mvm_fwrt_dump_end,
+ .fw_running = iwl_mvm_fwrt_fw_running,
};
static struct iwl_op_mode *
@@ -802,7 +810,6 @@ iwl_op_mode_mvm_start(struct iwl_trans *trans, const struct iwl_cfg *cfg,
iwl_mvm_leds_exit(mvm);
iwl_mvm_thermal_exit(mvm);
out_free:
- iwl_fw_runtime_exit(&mvm->fwrt);
iwl_fw_flush_dump(&mvm->fwrt);
if (iwlmvm_mod_params.init_dbg)
@@ -843,7 +850,6 @@ static void iwl_op_mode_mvm_stop(struct iwl_op_mode *op_mode)
#if defined(CONFIG_PM_SLEEP) && defined(CONFIG_IWLWIFI_DEBUGFS)
kfree(mvm->d3_resume_sram);
#endif
- iwl_fw_runtime_exit(&mvm->fwrt);
iwl_trans_op_mode_leave(mvm->trans);
iwl_phy_db_free(mvm->phy_db);
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/rs.c b/drivers/net/wireless/intel/iwlwifi/mvm/rs.c
index 60abb0084ee590..47f4c7a1d80d26 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/rs.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/rs.c
@@ -2684,7 +2684,8 @@ static void rs_get_initial_rate(struct iwl_mvm *mvm,
struct ieee80211_sta *sta,
struct iwl_lq_sta *lq_sta,
enum nl80211_band band,
- struct rs_rate *rate)
+ struct rs_rate *rate,
+ bool init)
{
int i, nentries;
unsigned long active_rate;
@@ -2738,14 +2739,25 @@ static void rs_get_initial_rate(struct iwl_mvm *mvm,
*/
if (sta->vht_cap.vht_supported &&
best_rssi > IWL_RS_LOW_RSSI_THRESHOLD) {
- switch (sta->bandwidth) {
- case IEEE80211_STA_RX_BW_160:
- case IEEE80211_STA_RX_BW_80:
- case IEEE80211_STA_RX_BW_40:
+ /*
+ * In AP mode, when a new station associates, rs is initialized
+ * immediately upon association completion, before the phy
+ * context is updated with the association parameters, so the
+ * sta bandwidth might be wider than the phy context allows.
+ * To avoid this issue, always initialize rs with 20mhz
+ * bandwidth rate, and after authorization, when the phy context
+ * is already up-to-date, re-init rs with the correct bw.
+ */
+ u32 bw = init ? RATE_MCS_CHAN_WIDTH_20 : rs_bw_from_sta_bw(sta);
+
+ switch (bw) {
+ case RATE_MCS_CHAN_WIDTH_40:
+ case RATE_MCS_CHAN_WIDTH_80:
+ case RATE_MCS_CHAN_WIDTH_160:
initial_rates = rs_optimal_rates_vht;
nentries = ARRAY_SIZE(rs_optimal_rates_vht);
break;
- case IEEE80211_STA_RX_BW_20:
+ case RATE_MCS_CHAN_WIDTH_20:
initial_rates = rs_optimal_rates_vht_20mhz;
nentries = ARRAY_SIZE(rs_optimal_rates_vht_20mhz);
break;
@@ -2756,7 +2768,7 @@ static void rs_get_initial_rate(struct iwl_mvm *mvm,
active_rate = lq_sta->active_siso_rate;
rate->type = LQ_VHT_SISO;
- rate->bw = rs_bw_from_sta_bw(sta);
+ rate->bw = bw;
} else if (sta->ht_cap.ht_supported &&
best_rssi > IWL_RS_LOW_RSSI_THRESHOLD) {
initial_rates = rs_optimal_rates_ht;
@@ -2839,7 +2851,7 @@ static void rs_initialize_lq(struct iwl_mvm *mvm,
tbl = &(lq_sta->lq_info[active_tbl]);
rate = &tbl->rate;
- rs_get_initial_rate(mvm, sta, lq_sta, band, rate);
+ rs_get_initial_rate(mvm, sta, lq_sta, band, rate, init);
rs_init_optimal_rate(mvm, sta, lq_sta);
WARN_ONCE(rate->ant != ANT_A && rate->ant != ANT_B,
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c b/drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c
index a3f7c1bf3cc858..580de5851fc7f6 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c
@@ -71,6 +71,7 @@ static inline int iwl_mvm_check_pn(struct iwl_mvm *mvm, struct sk_buff *skb,
struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
struct ieee80211_rx_status *stats = IEEE80211_SKB_RXCB(skb);
struct iwl_mvm_key_pn *ptk_pn;
+ int res;
u8 tid, keyidx;
u8 pn[IEEE80211_CCMP_PN_LEN];
u8 *extiv;
@@ -127,12 +128,13 @@ static inline int iwl_mvm_check_pn(struct iwl_mvm *mvm, struct sk_buff *skb,
pn[4] = extiv[1];
pn[5] = extiv[0];
- if (memcmp(pn, ptk_pn->q[queue].pn[tid],
- IEEE80211_CCMP_PN_LEN) <= 0)
+ res = memcmp(pn, ptk_pn->q[queue].pn[tid], IEEE80211_CCMP_PN_LEN);
+ if (res < 0)
+ return -1;
+ if (!res && !(stats->flag & RX_FLAG_ALLOW_SAME_PN))
return -1;
- if (!(stats->flag & RX_FLAG_AMSDU_MORE))
- memcpy(ptk_pn->q[queue].pn[tid], pn, IEEE80211_CCMP_PN_LEN);
+ memcpy(ptk_pn->q[queue].pn[tid], pn, IEEE80211_CCMP_PN_LEN);
stats->flag |= RX_FLAG_PN_VALIDATED;
return 0;
@@ -314,28 +316,21 @@ static void iwl_mvm_rx_csum(struct ieee80211_sta *sta,
}
/*
- * returns true if a packet outside BA session is a duplicate and
- * should be dropped
+ * returns true if a packet is a duplicate and should be dropped.
+ * Updates AMSDU PN tracking info
*/
-static bool iwl_mvm_is_nonagg_dup(struct ieee80211_sta *sta, int queue,
- struct ieee80211_rx_status *rx_status,
- struct ieee80211_hdr *hdr,
- struct iwl_rx_mpdu_desc *desc)
+static bool iwl_mvm_is_dup(struct ieee80211_sta *sta, int queue,
+ struct ieee80211_rx_status *rx_status,
+ struct ieee80211_hdr *hdr,
+ struct iwl_rx_mpdu_desc *desc)
{
struct iwl_mvm_sta *mvm_sta;
struct iwl_mvm_rxq_dup_data *dup_data;
- u8 baid, tid, sub_frame_idx;
+ u8 tid, sub_frame_idx;
if (WARN_ON(IS_ERR_OR_NULL(sta)))
return false;
- baid = (le32_to_cpu(desc->reorder_data) &
- IWL_RX_MPDU_REORDER_BAID_MASK) >>
- IWL_RX_MPDU_REORDER_BAID_SHIFT;
-
- if (baid != IWL_RX_REORDER_DATA_INVALID_BAID)
- return false;
-
mvm_sta = iwl_mvm_sta_from_mac80211(sta);
dup_data = &mvm_sta->dup_data[queue];
@@ -365,6 +360,12 @@ static bool iwl_mvm_is_nonagg_dup(struct ieee80211_sta *sta, int queue,
dup_data->last_sub_frame[tid] >= sub_frame_idx))
return true;
+ /* Allow same PN as the first subframe for following sub frames */
+ if (dup_data->last_seq[tid] == hdr->seq_ctrl &&
+ sub_frame_idx > dup_data->last_sub_frame[tid] &&
+ desc->mac_flags2 & IWL_RX_MPDU_MFLG2_AMSDU)
+ rx_status->flag |= RX_FLAG_ALLOW_SAME_PN;
+
dup_data->last_seq[tid] = hdr->seq_ctrl;
dup_data->last_sub_frame[tid] = sub_frame_idx;
@@ -971,7 +972,7 @@ void iwl_mvm_rx_mpdu_mq(struct iwl_mvm *mvm, struct napi_struct *napi,
if (ieee80211_is_data(hdr->frame_control))
iwl_mvm_rx_csum(sta, skb, desc);
- if (iwl_mvm_is_nonagg_dup(sta, queue, rx_status, hdr, desc)) {
+ if (iwl_mvm_is_dup(sta, queue, rx_status, hdr, desc)) {
kfree_skb(skb);
goto out;
}
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/sta.c b/drivers/net/wireless/intel/iwlwifi/mvm/sta.c
index 6b2674e0260682..630e23cb0ffb55 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/sta.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/sta.c
@@ -2039,7 +2039,7 @@ int iwl_mvm_add_mcast_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
struct iwl_trans_txq_scd_cfg cfg = {
.fifo = IWL_MVM_TX_FIFO_MCAST,
.sta_id = msta->sta_id,
- .tid = IWL_MAX_TID_COUNT,
+ .tid = 0,
.aggregate = false,
.frame_limit = IWL_FRAME_LIMIT,
};
@@ -2053,6 +2053,17 @@ int iwl_mvm_add_mcast_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
return -ENOTSUPP;
/*
+ * In IBSS, ieee80211_check_queues() sets the cab_queue to be
+ * invalid, so make sure we use the queue we want.
+ * Note that this is done here as we want to avoid making DQA
+ * changes in mac80211 layer.
+ */
+ if (vif->type == NL80211_IFTYPE_ADHOC) {
+ vif->cab_queue = IWL_MVM_DQA_GCAST_QUEUE;
+ mvmvif->cab_queue = vif->cab_queue;
+ }
+
+ /*
* While in previous FWs we had to exclude cab queue from TFD queue
* mask, now it is needed as any other queue.
*/
@@ -2079,24 +2090,13 @@ int iwl_mvm_add_mcast_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
if (iwl_mvm_has_new_tx_api(mvm)) {
int queue = iwl_mvm_tvqm_enable_txq(mvm, vif->cab_queue,
msta->sta_id,
- IWL_MAX_TID_COUNT,
+ 0,
timeout);
mvmvif->cab_queue = queue;
} else if (!fw_has_api(&mvm->fw->ucode_capa,
- IWL_UCODE_TLV_API_STA_TYPE)) {
- /*
- * In IBSS, ieee80211_check_queues() sets the cab_queue to be
- * invalid, so make sure we use the queue we want.
- * Note that this is done here as we want to avoid making DQA
- * changes in mac80211 layer.
- */
- if (vif->type == NL80211_IFTYPE_ADHOC) {
- vif->cab_queue = IWL_MVM_DQA_GCAST_QUEUE;
- mvmvif->cab_queue = vif->cab_queue;
- }
+ IWL_UCODE_TLV_API_STA_TYPE))
iwl_mvm_enable_txq(mvm, vif->cab_queue, vif->cab_queue, 0,
&cfg, timeout);
- }
return 0;
}
@@ -2115,7 +2115,7 @@ int iwl_mvm_rm_mcast_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
iwl_mvm_flush_sta(mvm, &mvmvif->mcast_sta, true, 0);
iwl_mvm_disable_txq(mvm, mvmvif->cab_queue, vif->cab_queue,
- IWL_MAX_TID_COUNT, 0);
+ 0, 0);
ret = iwl_mvm_rm_sta_common(mvm, mvmvif->mcast_sta.sta_id);
if (ret)
@@ -3170,8 +3170,9 @@ static int __iwl_mvm_remove_sta_key(struct iwl_mvm *mvm, u8 sta_id,
int ret, size;
u32 status;
+ /* This is a valid situation for GTK removal */
if (sta_id == IWL_MVM_INVALID_STA)
- return -EINVAL;
+ return 0;
key_flags = cpu_to_le16((keyconf->keyidx << STA_KEY_FLG_KEYID_POS) &
STA_KEY_FLG_KEYID_MSK);
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/time-event.c b/drivers/net/wireless/intel/iwlwifi/mvm/time-event.c
index 200ab50ec86b2f..acb217e666dbc6 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/time-event.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/time-event.c
@@ -616,7 +616,7 @@ void iwl_mvm_protect_session(struct iwl_mvm *mvm,
time_cmd.repeat = 1;
time_cmd.policy = cpu_to_le16(TE_V2_NOTIF_HOST_EVENT_START |
TE_V2_NOTIF_HOST_EVENT_END |
- T2_V2_START_IMMEDIATELY);
+ TE_V2_START_IMMEDIATELY);
if (!wait_for_notif) {
iwl_mvm_time_event_send_add(mvm, vif, te_data, &time_cmd);
@@ -803,7 +803,7 @@ int iwl_mvm_start_p2p_roc(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
time_cmd.repeat = 1;
time_cmd.policy = cpu_to_le16(TE_V2_NOTIF_HOST_EVENT_START |
TE_V2_NOTIF_HOST_EVENT_END |
- T2_V2_START_IMMEDIATELY);
+ TE_V2_START_IMMEDIATELY);
return iwl_mvm_time_event_send_add(mvm, vif, te_data, &time_cmd);
}
@@ -913,6 +913,8 @@ int iwl_mvm_schedule_csa_period(struct iwl_mvm *mvm,
time_cmd.interval = cpu_to_le32(1);
time_cmd.policy = cpu_to_le16(TE_V2_NOTIF_HOST_EVENT_START |
TE_V2_ABSENCE);
+ if (!apply_time)
+ time_cmd.policy |= cpu_to_le16(TE_V2_START_IMMEDIATELY);
return iwl_mvm_time_event_send_add(mvm, vif, te_data, &time_cmd);
}
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/tx.c b/drivers/net/wireless/intel/iwlwifi/mvm/tx.c
index dda77b327c9861..af6dfceab6b855 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/tx.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/tx.c
@@ -419,11 +419,11 @@ static void iwl_mvm_set_tx_cmd_crypto(struct iwl_mvm *mvm,
{
struct ieee80211_key_conf *keyconf = info->control.hw_key;
u8 *crypto_hdr = skb_frag->data + hdrlen;
+ enum iwl_tx_cmd_sec_ctrl type = TX_CMD_SEC_CCM;
u64 pn;
switch (keyconf->cipher) {
case WLAN_CIPHER_SUITE_CCMP:
- case WLAN_CIPHER_SUITE_CCMP_256:
iwl_mvm_set_tx_cmd_ccmp(info, tx_cmd);
iwl_mvm_set_tx_cmd_pn(info, crypto_hdr);
break;
@@ -447,13 +447,16 @@ static void iwl_mvm_set_tx_cmd_crypto(struct iwl_mvm *mvm,
break;
case WLAN_CIPHER_SUITE_GCMP:
case WLAN_CIPHER_SUITE_GCMP_256:
+ type = TX_CMD_SEC_GCMP;
+ /* Fall through */
+ case WLAN_CIPHER_SUITE_CCMP_256:
/* TODO: Taking the key from the table might introduce a race
* when PTK rekeying is done, having an old packets with a PN
* based on the old key but the message encrypted with a new
* one.
* Need to handle this.
*/
- tx_cmd->sec_ctl |= TX_CMD_SEC_GCMP | TX_CMD_SEC_KEY_FROM_TABLE;
+ tx_cmd->sec_ctl |= type | TX_CMD_SEC_KEY_FROM_TABLE;
tx_cmd->key[0] = keyconf->hw_key_idx;
iwl_mvm_set_tx_cmd_pn(info, crypto_hdr);
break;
@@ -645,7 +648,11 @@ int iwl_mvm_tx_skb_non_sta(struct iwl_mvm *mvm, struct sk_buff *skb)
if (info.control.vif->type == NL80211_IFTYPE_P2P_DEVICE ||
info.control.vif->type == NL80211_IFTYPE_AP ||
info.control.vif->type == NL80211_IFTYPE_ADHOC) {
- sta_id = mvmvif->bcast_sta.sta_id;
+ if (info.control.vif->type == NL80211_IFTYPE_P2P_DEVICE)
+ sta_id = mvmvif->bcast_sta.sta_id;
+ else
+ sta_id = mvmvif->mcast_sta.sta_id;
+
queue = iwl_mvm_get_ctrl_vif_queue(mvm, &info,
hdr->frame_control);
if (queue < 0)
diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/tx-gen2.c b/drivers/net/wireless/intel/iwlwifi/pcie/tx-gen2.c
index 6d0a907d5ba58f..fabae0f6068390 100644
--- a/drivers/net/wireless/intel/iwlwifi/pcie/tx-gen2.c
+++ b/drivers/net/wireless/intel/iwlwifi/pcie/tx-gen2.c
@@ -147,7 +147,7 @@ static void iwl_pcie_gen2_tfd_unmap(struct iwl_trans *trans,
/* Sanity check on number of chunks */
num_tbs = iwl_pcie_gen2_get_num_tbs(trans, tfd);
- if (num_tbs >= trans_pcie->max_tbs) {
+ if (num_tbs > trans_pcie->max_tbs) {
IWL_ERR(trans, "Too many chunks: %i\n", num_tbs);
return;
}
diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/tx.c b/drivers/net/wireless/intel/iwlwifi/pcie/tx.c
index 3f85713c41dcc9..1a566287993d5d 100644
--- a/drivers/net/wireless/intel/iwlwifi/pcie/tx.c
+++ b/drivers/net/wireless/intel/iwlwifi/pcie/tx.c
@@ -378,7 +378,7 @@ static void iwl_pcie_tfd_unmap(struct iwl_trans *trans,
/* Sanity check on number of chunks */
num_tbs = iwl_pcie_tfd_get_num_tbs(trans, tfd);
- if (num_tbs >= trans_pcie->max_tbs) {
+ if (num_tbs > trans_pcie->max_tbs) {
IWL_ERR(trans, "Too many chunks: %i\n", num_tbs);
/* @todo issue fatal error, it is quite serious situation */
return;
diff --git a/drivers/net/wireless/mac80211_hwsim.c b/drivers/net/wireless/mac80211_hwsim.c
index 6e0af815f25ec7..35b21f8152bb33 100644
--- a/drivers/net/wireless/mac80211_hwsim.c
+++ b/drivers/net/wireless/mac80211_hwsim.c
@@ -2727,6 +2727,7 @@ static int mac80211_hwsim_new_radio(struct genl_info *info,
mutex_init(&data->mutex);
data->netgroup = hwsim_net_get_netgroup(net);
+ data->wmediumd = hwsim_net_get_wmediumd(net);
/* Enable frame retransmissions for lossy channels */
hw->max_rates = 4;
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8723be/hw.c b/drivers/net/wireless/realtek/rtlwifi/rtl8723be/hw.c
index f9ccd13c79f94e..e7bbbc95cdb1f6 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8723be/hw.c
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8723be/hw.c
@@ -1125,7 +1125,8 @@ static void _rtl8723be_enable_aspm_back_door(struct ieee80211_hw *hw)
/* Configuration Space offset 0x70f BIT7 is used to control L0S */
tmp8 = _rtl8723be_dbi_read(rtlpriv, 0x70f);
- _rtl8723be_dbi_write(rtlpriv, 0x70f, tmp8 | BIT(7));
+ _rtl8723be_dbi_write(rtlpriv, 0x70f, tmp8 | BIT(7) |
+ ASPM_L1_LATENCY << 3);
/* Configuration Space offset 0x719 Bit3 is for L1
* BIT4 is for clock request
diff --git a/drivers/s390/net/qeth_core_main.c b/drivers/s390/net/qeth_core_main.c
index c8b308cfabf11b..3653bea384705f 100644
--- a/drivers/s390/net/qeth_core_main.c
+++ b/drivers/s390/net/qeth_core_main.c
@@ -527,8 +527,7 @@ static inline int qeth_is_cq(struct qeth_card *card, unsigned int queue)
queue == card->qdio.no_in_queues - 1;
}
-
-static int qeth_issue_next_read(struct qeth_card *card)
+static int __qeth_issue_next_read(struct qeth_card *card)
{
int rc;
struct qeth_cmd_buffer *iob;
@@ -559,6 +558,17 @@ static int qeth_issue_next_read(struct qeth_card *card)
return rc;
}
+static int qeth_issue_next_read(struct qeth_card *card)
+{
+ int ret;
+
+ spin_lock_irq(get_ccwdev_lock(CARD_RDEV(card)));
+ ret = __qeth_issue_next_read(card);
+ spin_unlock_irq(get_ccwdev_lock(CARD_RDEV(card)));
+
+ return ret;
+}
+
static struct qeth_reply *qeth_alloc_reply(struct qeth_card *card)
{
struct qeth_reply *reply;
@@ -960,7 +970,7 @@ void qeth_clear_thread_running_bit(struct qeth_card *card, unsigned long thread)
spin_lock_irqsave(&card->thread_mask_lock, flags);
card->thread_running_mask &= ~thread;
spin_unlock_irqrestore(&card->thread_mask_lock, flags);
- wake_up(&card->wait_q);
+ wake_up_all(&card->wait_q);
}
EXPORT_SYMBOL_GPL(qeth_clear_thread_running_bit);
@@ -1164,6 +1174,7 @@ static void qeth_irq(struct ccw_device *cdev, unsigned long intparm,
}
rc = qeth_get_problem(cdev, irb);
if (rc) {
+ card->read_or_write_problem = 1;
qeth_clear_ipacmd_list(card);
qeth_schedule_recovery(card);
goto out;
@@ -1182,7 +1193,7 @@ static void qeth_irq(struct ccw_device *cdev, unsigned long intparm,
return;
if (channel == &card->read &&
channel->state == CH_STATE_UP)
- qeth_issue_next_read(card);
+ __qeth_issue_next_read(card);
iob = channel->iob;
index = channel->buf_no;
@@ -5087,8 +5098,6 @@ static void qeth_core_free_card(struct qeth_card *card)
QETH_DBF_HEX(SETUP, 2, &card, sizeof(void *));
qeth_clean_channel(&card->read);
qeth_clean_channel(&card->write);
- if (card->dev)
- free_netdev(card->dev);
qeth_free_qdio_buffers(card);
unregister_service_level(&card->qeth_service_level);
kfree(card);
diff --git a/drivers/s390/net/qeth_l2_main.c b/drivers/s390/net/qeth_l2_main.c
index 7f236440483f2c..5ef4c978ad199f 100644
--- a/drivers/s390/net/qeth_l2_main.c
+++ b/drivers/s390/net/qeth_l2_main.c
@@ -915,8 +915,8 @@ static void qeth_l2_remove_device(struct ccwgroup_device *cgdev)
qeth_l2_set_offline(cgdev);
if (card->dev) {
- netif_napi_del(&card->napi);
unregister_netdev(card->dev);
+ free_netdev(card->dev);
card->dev = NULL;
}
return;
diff --git a/drivers/s390/net/qeth_l3_main.c b/drivers/s390/net/qeth_l3_main.c
index 962a04b68dd203..b6b12220da7157 100644
--- a/drivers/s390/net/qeth_l3_main.c
+++ b/drivers/s390/net/qeth_l3_main.c
@@ -2865,8 +2865,8 @@ static void qeth_l3_remove_device(struct ccwgroup_device *cgdev)
qeth_l3_set_offline(cgdev);
if (card->dev) {
- netif_napi_del(&card->napi);
unregister_netdev(card->dev);
+ free_netdev(card->dev);
card->dev = NULL;
}
diff --git a/drivers/soc/fsl/qbman/qman.c b/drivers/soc/fsl/qbman/qman.c
index e4f5bb056fd275..ba3cfa8e279b50 100644
--- a/drivers/soc/fsl/qbman/qman.c
+++ b/drivers/soc/fsl/qbman/qman.c
@@ -2443,39 +2443,21 @@ struct cgr_comp {
struct completion completion;
};
-static int qman_delete_cgr_thread(void *p)
+static void qman_delete_cgr_smp_call(void *p)
{
- struct cgr_comp *cgr_comp = (struct cgr_comp *)p;
- int ret;
-
- ret = qman_delete_cgr(cgr_comp->cgr);
- complete(&cgr_comp->completion);
-
- return ret;
+ qman_delete_cgr((struct qman_cgr *)p);
}
void qman_delete_cgr_safe(struct qman_cgr *cgr)
{
- struct task_struct *thread;
- struct cgr_comp cgr_comp;
-
preempt_disable();
if (qman_cgr_cpus[cgr->cgrid] != smp_processor_id()) {
- init_completion(&cgr_comp.completion);
- cgr_comp.cgr = cgr;
- thread = kthread_create(qman_delete_cgr_thread, &cgr_comp,
- "cgr_del");
-
- if (IS_ERR(thread))
- goto out;
-
- kthread_bind(thread, qman_cgr_cpus[cgr->cgrid]);
- wake_up_process(thread);
- wait_for_completion(&cgr_comp.completion);
+ smp_call_function_single(qman_cgr_cpus[cgr->cgrid],
+ qman_delete_cgr_smp_call, cgr, true);
preempt_enable();
return;
}
-out:
+
qman_delete_cgr(cgr);
preempt_enable();
}
diff --git a/drivers/vhost/net.c b/drivers/vhost/net.c
index 610cba276d4762..8139bc70ad7dcf 100644
--- a/drivers/vhost/net.c
+++ b/drivers/vhost/net.c
@@ -170,7 +170,7 @@ static void vhost_net_buf_unproduce(struct vhost_net_virtqueue *nvq)
if (nvq->rx_ring && !vhost_net_buf_is_empty(rxq)) {
ptr_ring_unconsume(nvq->rx_ring, rxq->queue + rxq->head,
vhost_net_buf_get_size(rxq),
- __skb_array_destroy_skb);
+ tun_ptr_free);
rxq->head = rxq->tail = 0;
}
}
@@ -948,6 +948,7 @@ static int vhost_net_open(struct inode *inode, struct file *f)
n->vqs[i].done_idx = 0;
n->vqs[i].vhost_hlen = 0;
n->vqs[i].sock_hlen = 0;
+ n->vqs[i].rx_ring = NULL;
vhost_net_buf_init(&n->vqs[i].rxq);
}
vhost_dev_init(dev, vqs, VHOST_NET_VQ_MAX);
@@ -972,6 +973,7 @@ static struct socket *vhost_net_stop_vq(struct vhost_net *n,
vhost_net_disable_vq(n, vq);
vq->private_data = NULL;
vhost_net_buf_unproduce(nvq);
+ nvq->rx_ring = NULL;
mutex_unlock(&vq->mutex);
return sock;
}
@@ -1161,14 +1163,14 @@ static long vhost_net_set_backend(struct vhost_net *n, unsigned index, int fd)
vhost_net_disable_vq(n, vq);
vq->private_data = sock;
vhost_net_buf_unproduce(nvq);
- if (index == VHOST_NET_VQ_RX)
- nvq->rx_ring = get_tap_ptr_ring(fd);
r = vhost_vq_init_access(vq);
if (r)
goto err_used;
r = vhost_net_enable_vq(n, vq);
if (r)
goto err_used;
+ if (index == VHOST_NET_VQ_RX)
+ nvq->rx_ring = get_tap_ptr_ring(fd);
oldubufs = nvq->ubufs;
nvq->ubufs = ubufs;
diff --git a/fs/sysfs/symlink.c b/fs/sysfs/symlink.c
index 8664db25a9a6f0..215c225b2ca17e 100644
--- a/fs/sysfs/symlink.c
+++ b/fs/sysfs/symlink.c
@@ -106,6 +106,7 @@ int sysfs_create_link_nowarn(struct kobject *kobj, struct kobject *target,
{
return sysfs_do_create_link(kobj, target, name, 0);
}
+EXPORT_SYMBOL_GPL(sysfs_create_link_nowarn);
/**
* sysfs_delete_link - remove symlink in object's directory.
diff --git a/include/linux/cgroup-defs.h b/include/linux/cgroup-defs.h
index 9f242b876fde2c..f8e76d01a5ade4 100644
--- a/include/linux/cgroup-defs.h
+++ b/include/linux/cgroup-defs.h
@@ -755,13 +755,13 @@ struct sock_cgroup_data {
* updaters and return part of the previous pointer as the prioidx or
* classid. Such races are short-lived and the result isn't critical.
*/
-static inline u16 sock_cgroup_prioidx(struct sock_cgroup_data *skcd)
+static inline u16 sock_cgroup_prioidx(const struct sock_cgroup_data *skcd)
{
/* fallback to 1 which is always the ID of the root cgroup */
return (skcd->is_data & 1) ? skcd->prioidx : 1;
}
-static inline u32 sock_cgroup_classid(struct sock_cgroup_data *skcd)
+static inline u32 sock_cgroup_classid(const struct sock_cgroup_data *skcd)
{
/* fallback to 0 which is the unconfigured default classid */
return (skcd->is_data & 1) ? skcd->classid : 0;
diff --git a/include/linux/if_tun.h b/include/linux/if_tun.h
index c5b0a75a781219..fd00170b494f7e 100644
--- a/include/linux/if_tun.h
+++ b/include/linux/if_tun.h
@@ -25,6 +25,7 @@ struct ptr_ring *tun_get_tx_ring(struct file *file);
bool tun_is_xdp_buff(void *ptr);
void *tun_xdp_to_ptr(void *ptr);
void *tun_ptr_to_xdp(void *ptr);
+void tun_ptr_free(void *ptr);
#else
#include <linux/err.h>
#include <linux/errno.h>
@@ -50,5 +51,8 @@ static inline void *tun_ptr_to_xdp(void *ptr)
{
return NULL;
}
+static inline void tun_ptr_free(void *ptr)
+{
+}
#endif /* CONFIG_TUN */
#endif /* __IF_TUN_H */
diff --git a/include/linux/if_vlan.h b/include/linux/if_vlan.h
index 5e6a2d4dc36603..c4a1cff9c76861 100644
--- a/include/linux/if_vlan.h
+++ b/include/linux/if_vlan.h
@@ -300,30 +300,34 @@ static inline bool vlan_hw_offload_capable(netdev_features_t features,
}
/**
- * __vlan_insert_tag - regular VLAN tag inserting
+ * __vlan_insert_inner_tag - inner VLAN tag inserting
* @skb: skbuff to tag
* @vlan_proto: VLAN encapsulation protocol
* @vlan_tci: VLAN TCI to insert
+ * @mac_len: MAC header length including outer vlan headers
*
- * Inserts the VLAN tag into @skb as part of the payload
+ * Inserts the VLAN tag into @skb as part of the payload at offset mac_len
* Returns error if skb_cow_head failes.
*
* Does not change skb->protocol so this function can be used during receive.
*/
-static inline int __vlan_insert_tag(struct sk_buff *skb,
- __be16 vlan_proto, u16 vlan_tci)
+static inline int __vlan_insert_inner_tag(struct sk_buff *skb,
+ __be16 vlan_proto, u16 vlan_tci,
+ unsigned int mac_len)
{
struct vlan_ethhdr *veth;
if (skb_cow_head(skb, VLAN_HLEN) < 0)
return -ENOMEM;
- veth = skb_push(skb, VLAN_HLEN);
+ skb_push(skb, VLAN_HLEN);
- /* Move the mac addresses to the beginning of the new header. */
- memmove(skb->data, skb->data + VLAN_HLEN, 2 * ETH_ALEN);
+ /* Move the mac header sans proto to the beginning of the new header. */
+ memmove(skb->data, skb->data + VLAN_HLEN, mac_len - ETH_TLEN);
skb->mac_header -= VLAN_HLEN;
+ veth = (struct vlan_ethhdr *)(skb->data + mac_len - ETH_HLEN);
+
/* first, the ethernet type */
veth->h_vlan_proto = vlan_proto;
@@ -334,12 +338,30 @@ static inline int __vlan_insert_tag(struct sk_buff *skb,
}
/**
- * vlan_insert_tag - regular VLAN tag inserting
+ * __vlan_insert_tag - regular VLAN tag inserting
* @skb: skbuff to tag
* @vlan_proto: VLAN encapsulation protocol
* @vlan_tci: VLAN TCI to insert
*
* Inserts the VLAN tag into @skb as part of the payload
+ * Returns error if skb_cow_head failes.
+ *
+ * Does not change skb->protocol so this function can be used during receive.
+ */
+static inline int __vlan_insert_tag(struct sk_buff *skb,
+ __be16 vlan_proto, u16 vlan_tci)
+{
+ return __vlan_insert_inner_tag(skb, vlan_proto, vlan_tci, ETH_HLEN);
+}
+
+/**
+ * vlan_insert_inner_tag - inner VLAN tag inserting
+ * @skb: skbuff to tag
+ * @vlan_proto: VLAN encapsulation protocol
+ * @vlan_tci: VLAN TCI to insert
+ * @mac_len: MAC header length including outer vlan headers
+ *
+ * Inserts the VLAN tag into @skb as part of the payload at offset mac_len
* Returns a VLAN tagged skb. If a new skb is created, @skb is freed.
*
* Following the skb_unshare() example, in case of error, the calling function
@@ -347,12 +369,14 @@ static inline int __vlan_insert_tag(struct sk_buff *skb,
*
* Does not change skb->protocol so this function can be used during receive.
*/
-static inline struct sk_buff *vlan_insert_tag(struct sk_buff *skb,
- __be16 vlan_proto, u16 vlan_tci)
+static inline struct sk_buff *vlan_insert_inner_tag(struct sk_buff *skb,
+ __be16 vlan_proto,
+ u16 vlan_tci,
+ unsigned int mac_len)
{
int err;
- err = __vlan_insert_tag(skb, vlan_proto, vlan_tci);
+ err = __vlan_insert_inner_tag(skb, vlan_proto, vlan_tci, mac_len);
if (err) {
dev_kfree_skb_any(skb);
return NULL;
@@ -361,6 +385,26 @@ static inline struct sk_buff *vlan_insert_tag(struct sk_buff *skb,
}
/**
+ * vlan_insert_tag - regular VLAN tag inserting
+ * @skb: skbuff to tag
+ * @vlan_proto: VLAN encapsulation protocol
+ * @vlan_tci: VLAN TCI to insert
+ *
+ * Inserts the VLAN tag into @skb as part of the payload
+ * Returns a VLAN tagged skb. If a new skb is created, @skb is freed.
+ *
+ * Following the skb_unshare() example, in case of error, the calling function
+ * doesn't have to worry about freeing the original skb.
+ *
+ * Does not change skb->protocol so this function can be used during receive.
+ */
+static inline struct sk_buff *vlan_insert_tag(struct sk_buff *skb,
+ __be16 vlan_proto, u16 vlan_tci)
+{
+ return vlan_insert_inner_tag(skb, vlan_proto, vlan_tci, ETH_HLEN);
+}
+
+/**
* vlan_insert_tag_set_proto - regular VLAN tag inserting
* @skb: skbuff to tag
* @vlan_proto: VLAN encapsulation protocol
diff --git a/include/linux/net.h b/include/linux/net.h
index 91216b16feb78e..2a0391eea05c36 100644
--- a/include/linux/net.h
+++ b/include/linux/net.h
@@ -222,6 +222,7 @@ enum {
int sock_wake_async(struct socket_wq *sk_wq, int how, int band);
int sock_register(const struct net_proto_family *fam);
void sock_unregister(int family);
+bool sock_is_registered(int family);
int __sock_create(struct net *net, int family, int type, int proto,
struct socket **res, int kern);
int sock_create(int family, int type, int proto, struct socket **res);
diff --git a/include/linux/netfilter/x_tables.h b/include/linux/netfilter/x_tables.h
index 1313b35c3ab791..14529511c4b846 100644
--- a/include/linux/netfilter/x_tables.h
+++ b/include/linux/netfilter/x_tables.h
@@ -285,6 +285,8 @@ unsigned int *xt_alloc_entry_offsets(unsigned int size);
bool xt_find_jump_offset(const unsigned int *offsets,
unsigned int target, unsigned int size);
+int xt_check_proc_name(const char *name, unsigned int size);
+
int xt_check_match(struct xt_mtchk_param *, unsigned int size, u_int8_t proto,
bool inv_proto);
int xt_check_target(struct xt_tgchk_param *, unsigned int size, u_int8_t proto,
diff --git a/include/linux/phy.h b/include/linux/phy.h
index d7069539f3519f..7c4c2379e01031 100644
--- a/include/linux/phy.h
+++ b/include/linux/phy.h
@@ -984,6 +984,10 @@ static inline int genphy_no_soft_reset(struct phy_device *phydev)
{
return 0;
}
+int genphy_read_mmd_unsupported(struct phy_device *phdev, int devad,
+ u16 regnum);
+int genphy_write_mmd_unsupported(struct phy_device *phdev, int devnum,
+ u16 regnum, u16 val);
/* Clause 45 PHY */
int genphy_c45_restart_aneg(struct phy_device *phydev);
@@ -1012,7 +1016,6 @@ int phy_driver_register(struct phy_driver *new_driver, struct module *owner);
int phy_drivers_register(struct phy_driver *new_driver, int n,
struct module *owner);
void phy_state_machine(struct work_struct *work);
-void phy_change(struct phy_device *phydev);
void phy_change_work(struct work_struct *work);
void phy_mac_interrupt(struct phy_device *phydev);
void phy_start_machine(struct phy_device *phydev);
diff --git a/include/linux/rhashtable.h b/include/linux/rhashtable.h
index c9df2527e0cdd6..668a21f04b0966 100644
--- a/include/linux/rhashtable.h
+++ b/include/linux/rhashtable.h
@@ -766,8 +766,10 @@ slow_path:
if (!key ||
(params.obj_cmpfn ?
params.obj_cmpfn(&arg, rht_obj(ht, head)) :
- rhashtable_compare(&arg, rht_obj(ht, head))))
+ rhashtable_compare(&arg, rht_obj(ht, head)))) {
+ pprev = &head->next;
continue;
+ }
data = rht_obj(ht, head);
diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h
index ddf77cf4ff2dd1..99df17109e1b51 100644
--- a/include/linux/skbuff.h
+++ b/include/linux/skbuff.h
@@ -4037,6 +4037,12 @@ static inline bool skb_is_gso_v6(const struct sk_buff *skb)
return skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6;
}
+/* Note: Should be called only if skb_is_gso(skb) is true */
+static inline bool skb_is_gso_sctp(const struct sk_buff *skb)
+{
+ return skb_shinfo(skb)->gso_type & SKB_GSO_SCTP;
+}
+
static inline void skb_gso_reset(struct sk_buff *skb)
{
skb_shinfo(skb)->gso_size = 0;
@@ -4044,6 +4050,22 @@ static inline void skb_gso_reset(struct sk_buff *skb)
skb_shinfo(skb)->gso_type = 0;
}
+static inline void skb_increase_gso_size(struct skb_shared_info *shinfo,
+ u16 increment)
+{
+ if (WARN_ON_ONCE(shinfo->gso_size == GSO_BY_FRAGS))
+ return;
+ shinfo->gso_size += increment;
+}
+
+static inline void skb_decrease_gso_size(struct skb_shared_info *shinfo,
+ u16 decrement)
+{
+ if (WARN_ON_ONCE(shinfo->gso_size == GSO_BY_FRAGS))
+ return;
+ shinfo->gso_size -= decrement;
+}
+
void __skb_warn_lro_forwarding(const struct sk_buff *skb);
static inline bool skb_warn_if_lro(const struct sk_buff *skb)
diff --git a/include/linux/u64_stats_sync.h b/include/linux/u64_stats_sync.h
index 5bdbd9f49395f8..07ee0f84a46caa 100644
--- a/include/linux/u64_stats_sync.h
+++ b/include/linux/u64_stats_sync.h
@@ -90,6 +90,28 @@ static inline void u64_stats_update_end(struct u64_stats_sync *syncp)
#endif
}
+static inline unsigned long
+u64_stats_update_begin_irqsave(struct u64_stats_sync *syncp)
+{
+ unsigned long flags = 0;
+
+#if BITS_PER_LONG==32 && defined(CONFIG_SMP)
+ local_irq_save(flags);
+ write_seqcount_begin(&syncp->seq);
+#endif
+ return flags;
+}
+
+static inline void
+u64_stats_update_end_irqrestore(struct u64_stats_sync *syncp,
+ unsigned long flags)
+{
+#if BITS_PER_LONG==32 && defined(CONFIG_SMP)
+ write_seqcount_end(&syncp->seq);
+ local_irq_restore(flags);
+#endif
+}
+
static inline void u64_stats_update_begin_raw(struct u64_stats_sync *syncp)
{
#if BITS_PER_LONG==32 && defined(CONFIG_SMP)
diff --git a/include/net/ip.h b/include/net/ip.h
index 746abff9ce517e..f49b3a576becfb 100644
--- a/include/net/ip.h
+++ b/include/net/ip.h
@@ -328,6 +328,13 @@ int ip_decrease_ttl(struct iphdr *iph)
return --iph->ttl;
}
+static inline int ip_mtu_locked(const struct dst_entry *dst)
+{
+ const struct rtable *rt = (const struct rtable *)dst;
+
+ return rt->rt_mtu_locked || dst_metric_locked(dst, RTAX_MTU);
+}
+
static inline
int ip_dont_fragment(const struct sock *sk, const struct dst_entry *dst)
{
@@ -335,7 +342,7 @@ int ip_dont_fragment(const struct sock *sk, const struct dst_entry *dst)
return pmtudisc == IP_PMTUDISC_DO ||
(pmtudisc == IP_PMTUDISC_WANT &&
- !(dst_metric_locked(dst, RTAX_MTU)));
+ !ip_mtu_locked(dst));
}
static inline bool ip_sk_accept_pmtu(const struct sock *sk)
@@ -361,7 +368,7 @@ static inline unsigned int ip_dst_mtu_maybe_forward(const struct dst_entry *dst,
struct net *net = dev_net(dst->dev);
if (net->ipv4.sysctl_ip_fwd_use_pmtu ||
- dst_metric_locked(dst, RTAX_MTU) ||
+ ip_mtu_locked(dst) ||
!forwarding)
return dst_mtu(dst);
diff --git a/include/net/ip6_route.h b/include/net/ip6_route.h
index 27d23a65f3cd0b..ac0866bb9e9361 100644
--- a/include/net/ip6_route.h
+++ b/include/net/ip6_route.h
@@ -179,6 +179,9 @@ void rt6_disable_ip(struct net_device *dev, unsigned long event);
void rt6_sync_down_dev(struct net_device *dev, unsigned long event);
void rt6_multipath_rebalance(struct rt6_info *rt);
+void rt6_uncached_list_add(struct rt6_info *rt);
+void rt6_uncached_list_del(struct rt6_info *rt);
+
static inline const struct rt6_info *skb_rt6_info(const struct sk_buff *skb)
{
const struct dst_entry *dst = skb_dst(skb);
diff --git a/include/net/ip_fib.h b/include/net/ip_fib.h
index f80524396c062d..77d0a78cf7d2b8 100644
--- a/include/net/ip_fib.h
+++ b/include/net/ip_fib.h
@@ -59,6 +59,7 @@ struct fib_nh_exception {
int fnhe_genid;
__be32 fnhe_daddr;
u32 fnhe_pmtu;
+ bool fnhe_mtu_locked;
__be32 fnhe_gw;
unsigned long fnhe_expires;
struct rtable __rcu *fnhe_rth_input;
diff --git a/include/net/mac80211.h b/include/net/mac80211.h
index c96511fa9198e9..2b581bd938120a 100644
--- a/include/net/mac80211.h
+++ b/include/net/mac80211.h
@@ -2063,6 +2063,9 @@ struct ieee80211_txq {
* @IEEE80211_HW_SUPPORTS_TDLS_BUFFER_STA: Hardware supports buffer STA on
* TDLS links.
*
+ * @IEEE80211_HW_DOESNT_SUPPORT_QOS_NDP: The driver (or firmware) doesn't
+ * support QoS NDP for AP probing - that's most likely a driver bug.
+ *
* @NUM_IEEE80211_HW_FLAGS: number of hardware flags, used for sizing arrays
*/
enum ieee80211_hw_flags {
@@ -2106,6 +2109,7 @@ enum ieee80211_hw_flags {
IEEE80211_HW_REPORTS_LOW_ACK,
IEEE80211_HW_SUPPORTS_TX_FRAG,
IEEE80211_HW_SUPPORTS_TDLS_BUFFER_STA,
+ IEEE80211_HW_DOESNT_SUPPORT_QOS_NDP,
/* keep last, obviously */
NUM_IEEE80211_HW_FLAGS
diff --git a/include/net/route.h b/include/net/route.h
index 1eb9ce470e25ce..20a92ca9e11582 100644
--- a/include/net/route.h
+++ b/include/net/route.h
@@ -63,7 +63,8 @@ struct rtable {
__be32 rt_gateway;
/* Miscellaneous cached information */
- u32 rt_pmtu;
+ u32 rt_mtu_locked:1,
+ rt_pmtu:31;
u32 rt_table_id;
@@ -227,6 +228,9 @@ struct in_ifaddr;
void fib_add_ifaddr(struct in_ifaddr *);
void fib_del_ifaddr(struct in_ifaddr *, struct in_ifaddr *);
+void rt_add_uncached_list(struct rtable *rt);
+void rt_del_uncached_list(struct rtable *rt);
+
static inline void ip_rt_put(struct rtable *rt)
{
/* dst_release() accepts a NULL parameter.
diff --git a/include/net/sch_generic.h b/include/net/sch_generic.h
index e2ab13687fb974..2092d33194dd1f 100644
--- a/include/net/sch_generic.h
+++ b/include/net/sch_generic.h
@@ -824,6 +824,16 @@ static inline void __qdisc_drop(struct sk_buff *skb, struct sk_buff **to_free)
*to_free = skb;
}
+static inline void __qdisc_drop_all(struct sk_buff *skb,
+ struct sk_buff **to_free)
+{
+ if (skb->prev)
+ skb->prev->next = *to_free;
+ else
+ skb->next = *to_free;
+ *to_free = skb;
+}
+
static inline unsigned int __qdisc_queue_drop_head(struct Qdisc *sch,
struct qdisc_skb_head *qh,
struct sk_buff **to_free)
@@ -956,6 +966,15 @@ static inline int qdisc_drop(struct sk_buff *skb, struct Qdisc *sch,
return NET_XMIT_DROP;
}
+static inline int qdisc_drop_all(struct sk_buff *skb, struct Qdisc *sch,
+ struct sk_buff **to_free)
+{
+ __qdisc_drop_all(skb, to_free);
+ qdisc_qstats_drop(sch);
+
+ return NET_XMIT_DROP;
+}
+
/* Length to Time (L2T) lookup in a qdisc_rate_table, to determine how
long it will take to send a packet given its size.
*/
diff --git a/include/net/sock.h b/include/net/sock.h
index 169c92afcafa3d..ae23f3b389cac6 100644
--- a/include/net/sock.h
+++ b/include/net/sock.h
@@ -1137,6 +1137,7 @@ struct proto {
int proto_register(struct proto *prot, int alloc_slab);
void proto_unregister(struct proto *prot);
+int sock_load_diag_module(int family, int protocol);
#ifdef SOCK_REFCNT_DEBUG
static inline void sk_refcnt_debug_inc(struct sock *sk)
diff --git a/include/uapi/linux/if_ether.h b/include/uapi/linux/if_ether.h
index 8bbbcb5cd94b44..820de5d222d22b 100644
--- a/include/uapi/linux/if_ether.h
+++ b/include/uapi/linux/if_ether.h
@@ -30,6 +30,7 @@
*/
#define ETH_ALEN 6 /* Octets in one ethernet addr */
+#define ETH_TLEN 2 /* Octets in ethernet type field */
#define ETH_HLEN 14 /* Total octets in header. */
#define ETH_ZLEN 60 /* Min. octets in frame sans FCS */
#define ETH_DATA_LEN 1500 /* Max. octets in payload */
diff --git a/kernel/bpf/syscall.c b/kernel/bpf/syscall.c
index e24aa3241387de..43f95d190eeaf8 100644
--- a/kernel/bpf/syscall.c
+++ b/kernel/bpf/syscall.c
@@ -1845,7 +1845,7 @@ SYSCALL_DEFINE3(bpf, int, cmd, union bpf_attr __user *, uattr, unsigned int, siz
union bpf_attr attr = {};
int err;
- if (!capable(CAP_SYS_ADMIN) && sysctl_unprivileged_bpf_disabled)
+ if (sysctl_unprivileged_bpf_disabled && !capable(CAP_SYS_ADMIN))
return -EPERM;
err = check_uarg_tail_zero(uattr, sizeof(attr), size);
diff --git a/kernel/fail_function.c b/kernel/fail_function.c
index 21b0122cb39cb1..1d5632d8bbccfd 100644
--- a/kernel/fail_function.c
+++ b/kernel/fail_function.c
@@ -14,6 +14,15 @@
static int fei_kprobe_handler(struct kprobe *kp, struct pt_regs *regs);
+static void fei_post_handler(struct kprobe *kp, struct pt_regs *regs,
+ unsigned long flags)
+{
+ /*
+ * A dummy post handler is required to prohibit optimizing, because
+ * jump optimization does not support execution path overriding.
+ */
+}
+
struct fei_attr {
struct list_head list;
struct kprobe kp;
@@ -56,6 +65,7 @@ static struct fei_attr *fei_attr_new(const char *sym, unsigned long addr)
return NULL;
}
attr->kp.pre_handler = fei_kprobe_handler;
+ attr->kp.post_handler = fei_post_handler;
attr->retval = adjust_error_retval(addr, 0);
INIT_LIST_HEAD(&attr->list);
}
diff --git a/kernel/trace/bpf_trace.c b/kernel/trace/bpf_trace.c
index c0a9e310d71501..01e6b3a38871e2 100644
--- a/kernel/trace/bpf_trace.c
+++ b/kernel/trace/bpf_trace.c
@@ -661,7 +661,41 @@ static const struct bpf_func_proto bpf_get_stackid_proto_tp = {
.arg3_type = ARG_ANYTHING,
};
-BPF_CALL_3(bpf_perf_prog_read_value_tp, struct bpf_perf_event_data_kern *, ctx,
+static const struct bpf_func_proto *tp_prog_func_proto(enum bpf_func_id func_id)
+{
+ switch (func_id) {
+ case BPF_FUNC_perf_event_output:
+ return &bpf_perf_event_output_proto_tp;
+ case BPF_FUNC_get_stackid:
+ return &bpf_get_stackid_proto_tp;
+ default:
+ return tracing_func_proto(func_id);
+ }
+}
+
+static bool tp_prog_is_valid_access(int off, int size, enum bpf_access_type type,
+ struct bpf_insn_access_aux *info)
+{
+ if (off < sizeof(void *) || off >= PERF_MAX_TRACE_SIZE)
+ return false;
+ if (type != BPF_READ)
+ return false;
+ if (off % size != 0)
+ return false;
+
+ BUILD_BUG_ON(PERF_MAX_TRACE_SIZE % sizeof(__u64));
+ return true;
+}
+
+const struct bpf_verifier_ops tracepoint_verifier_ops = {
+ .get_func_proto = tp_prog_func_proto,
+ .is_valid_access = tp_prog_is_valid_access,
+};
+
+const struct bpf_prog_ops tracepoint_prog_ops = {
+};
+
+BPF_CALL_3(bpf_perf_prog_read_value, struct bpf_perf_event_data_kern *, ctx,
struct bpf_perf_event_value *, buf, u32, size)
{
int err = -EINVAL;
@@ -678,8 +712,8 @@ clear:
return err;
}
-static const struct bpf_func_proto bpf_perf_prog_read_value_proto_tp = {
- .func = bpf_perf_prog_read_value_tp,
+static const struct bpf_func_proto bpf_perf_prog_read_value_proto = {
+ .func = bpf_perf_prog_read_value,
.gpl_only = true,
.ret_type = RET_INTEGER,
.arg1_type = ARG_PTR_TO_CTX,
@@ -687,7 +721,7 @@ static const struct bpf_func_proto bpf_perf_prog_read_value_proto_tp = {
.arg3_type = ARG_CONST_SIZE,
};
-static const struct bpf_func_proto *tp_prog_func_proto(enum bpf_func_id func_id)
+static const struct bpf_func_proto *pe_prog_func_proto(enum bpf_func_id func_id)
{
switch (func_id) {
case BPF_FUNC_perf_event_output:
@@ -695,34 +729,12 @@ static const struct bpf_func_proto *tp_prog_func_proto(enum bpf_func_id func_id)
case BPF_FUNC_get_stackid:
return &bpf_get_stackid_proto_tp;
case BPF_FUNC_perf_prog_read_value:
- return &bpf_perf_prog_read_value_proto_tp;
+ return &bpf_perf_prog_read_value_proto;
default:
return tracing_func_proto(func_id);
}
}
-static bool tp_prog_is_valid_access(int off, int size, enum bpf_access_type type,
- struct bpf_insn_access_aux *info)
-{
- if (off < sizeof(void *) || off >= PERF_MAX_TRACE_SIZE)
- return false;
- if (type != BPF_READ)
- return false;
- if (off % size != 0)
- return false;
-
- BUILD_BUG_ON(PERF_MAX_TRACE_SIZE % sizeof(__u64));
- return true;
-}
-
-const struct bpf_verifier_ops tracepoint_verifier_ops = {
- .get_func_proto = tp_prog_func_proto,
- .is_valid_access = tp_prog_is_valid_access,
-};
-
-const struct bpf_prog_ops tracepoint_prog_ops = {
-};
-
static bool pe_prog_is_valid_access(int off, int size, enum bpf_access_type type,
struct bpf_insn_access_aux *info)
{
@@ -779,7 +791,7 @@ static u32 pe_prog_convert_ctx_access(enum bpf_access_type type,
}
const struct bpf_verifier_ops perf_event_verifier_ops = {
- .get_func_proto = tp_prog_func_proto,
+ .get_func_proto = pe_prog_func_proto,
.is_valid_access = pe_prog_is_valid_access,
.convert_ctx_access = pe_prog_convert_ctx_access,
};
diff --git a/lib/rhashtable.c b/lib/rhashtable.c
index 3825c30aaa3698..47de025b624520 100644
--- a/lib/rhashtable.c
+++ b/lib/rhashtable.c
@@ -506,8 +506,10 @@ static void *rhashtable_lookup_one(struct rhashtable *ht,
if (!key ||
(ht->p.obj_cmpfn ?
ht->p.obj_cmpfn(&arg, rht_obj(ht, head)) :
- rhashtable_compare(&arg, rht_obj(ht, head))))
+ rhashtable_compare(&arg, rht_obj(ht, head)))) {
+ pprev = &head->next;
continue;
+ }
if (!ht->rhlist)
return rht_obj(ht, head);
diff --git a/lib/test_bpf.c b/lib/test_bpf.c
index 2efb213716faaa..3e9335493fe493 100644
--- a/lib/test_bpf.c
+++ b/lib/test_bpf.c
@@ -5467,7 +5467,7 @@ static struct bpf_test tests[] = {
{
"BPF_MAXINSNS: Jump, gap, jump, ...",
{ },
-#ifdef CONFIG_BPF_JIT_ALWAYS_ON
+#if defined(CONFIG_BPF_JIT_ALWAYS_ON) && defined(CONFIG_X86)
CLASSIC | FLAG_NO_DATA | FLAG_EXPECTED_FAIL,
#else
CLASSIC | FLAG_NO_DATA,
diff --git a/lib/test_rhashtable.c b/lib/test_rhashtable.c
index 76d3667fdea21c..f4000c137dbed6 100644
--- a/lib/test_rhashtable.c
+++ b/lib/test_rhashtable.c
@@ -79,6 +79,21 @@ struct thread_data {
struct test_obj *objs;
};
+static u32 my_hashfn(const void *data, u32 len, u32 seed)
+{
+ const struct test_obj_rhl *obj = data;
+
+ return (obj->value.id % 10) << RHT_HASH_RESERVED_SPACE;
+}
+
+static int my_cmpfn(struct rhashtable_compare_arg *arg, const void *obj)
+{
+ const struct test_obj_rhl *test_obj = obj;
+ const struct test_obj_val *val = arg->key;
+
+ return test_obj->value.id - val->id;
+}
+
static struct rhashtable_params test_rht_params = {
.head_offset = offsetof(struct test_obj, node),
.key_offset = offsetof(struct test_obj, value),
@@ -87,6 +102,17 @@ static struct rhashtable_params test_rht_params = {
.nulls_base = (3U << RHT_BASE_SHIFT),
};
+static struct rhashtable_params test_rht_params_dup = {
+ .head_offset = offsetof(struct test_obj_rhl, list_node),
+ .key_offset = offsetof(struct test_obj_rhl, value),
+ .key_len = sizeof(struct test_obj_val),
+ .hashfn = jhash,
+ .obj_hashfn = my_hashfn,
+ .obj_cmpfn = my_cmpfn,
+ .nelem_hint = 128,
+ .automatic_shrinking = false,
+};
+
static struct semaphore prestart_sem;
static struct semaphore startup_sem = __SEMAPHORE_INITIALIZER(startup_sem, 0);
@@ -465,6 +491,112 @@ static int __init test_rhashtable_max(struct test_obj *array,
return err;
}
+static unsigned int __init print_ht(struct rhltable *rhlt)
+{
+ struct rhashtable *ht;
+ const struct bucket_table *tbl;
+ char buff[512] = "";
+ unsigned int i, cnt = 0;
+
+ ht = &rhlt->ht;
+ tbl = rht_dereference(ht->tbl, ht);
+ for (i = 0; i < tbl->size; i++) {
+ struct rhash_head *pos, *next;
+ struct test_obj_rhl *p;
+
+ pos = rht_dereference(tbl->buckets[i], ht);
+ next = !rht_is_a_nulls(pos) ? rht_dereference(pos->next, ht) : NULL;
+
+ if (!rht_is_a_nulls(pos)) {
+ sprintf(buff, "%s\nbucket[%d] -> ", buff, i);
+ }
+
+ while (!rht_is_a_nulls(pos)) {
+ struct rhlist_head *list = container_of(pos, struct rhlist_head, rhead);
+ sprintf(buff, "%s[[", buff);
+ do {
+ pos = &list->rhead;
+ list = rht_dereference(list->next, ht);
+ p = rht_obj(ht, pos);
+
+ sprintf(buff, "%s val %d (tid=%d)%s", buff, p->value.id, p->value.tid,
+ list? ", " : " ");
+ cnt++;
+ } while (list);
+
+ pos = next,
+ next = !rht_is_a_nulls(pos) ?
+ rht_dereference(pos->next, ht) : NULL;
+
+ sprintf(buff, "%s]]%s", buff, !rht_is_a_nulls(pos) ? " -> " : "");
+ }
+ }
+ printk(KERN_ERR "\n---- ht: ----%s\n-------------\n", buff);
+
+ return cnt;
+}
+
+static int __init test_insert_dup(struct test_obj_rhl *rhl_test_objects,
+ int cnt, bool slow)
+{
+ struct rhltable rhlt;
+ unsigned int i, ret;
+ const char *key;
+ int err = 0;
+
+ err = rhltable_init(&rhlt, &test_rht_params_dup);
+ if (WARN_ON(err))
+ return err;
+
+ for (i = 0; i < cnt; i++) {
+ rhl_test_objects[i].value.tid = i;
+ key = rht_obj(&rhlt.ht, &rhl_test_objects[i].list_node.rhead);
+ key += test_rht_params_dup.key_offset;
+
+ if (slow) {
+ err = PTR_ERR(rhashtable_insert_slow(&rhlt.ht, key,
+ &rhl_test_objects[i].list_node.rhead));
+ if (err == -EAGAIN)
+ err = 0;
+ } else
+ err = rhltable_insert(&rhlt,
+ &rhl_test_objects[i].list_node,
+ test_rht_params_dup);
+ if (WARN(err, "error %d on element %d/%d (%s)\n", err, i, cnt, slow? "slow" : "fast"))
+ goto skip_print;
+ }
+
+ ret = print_ht(&rhlt);
+ WARN(ret != cnt, "missing rhltable elements (%d != %d, %s)\n", ret, cnt, slow? "slow" : "fast");
+
+skip_print:
+ rhltable_destroy(&rhlt);
+
+ return 0;
+}
+
+static int __init test_insert_duplicates_run(void)
+{
+ struct test_obj_rhl rhl_test_objects[3] = {};
+
+ pr_info("test inserting duplicates\n");
+
+ /* two different values that map to same bucket */
+ rhl_test_objects[0].value.id = 1;
+ rhl_test_objects[1].value.id = 21;
+
+ /* and another duplicate with same as [0] value
+ * which will be second on the bucket list */
+ rhl_test_objects[2].value.id = rhl_test_objects[0].value.id;
+
+ test_insert_dup(rhl_test_objects, 2, false);
+ test_insert_dup(rhl_test_objects, 3, false);
+ test_insert_dup(rhl_test_objects, 2, true);
+ test_insert_dup(rhl_test_objects, 3, true);
+
+ return 0;
+}
+
static int thread_lookup_test(struct thread_data *tdata)
{
unsigned int entries = tdata->entries;
@@ -613,6 +745,8 @@ static int __init test_rht_init(void)
do_div(total_time, runs);
pr_info("Average test time: %llu\n", total_time);
+ test_insert_duplicates_run();
+
if (!tcount)
return 0;
diff --git a/net/8021q/vlan_core.c b/net/8021q/vlan_core.c
index 64aa9f755e1d25..45c9bf5ff3a0c1 100644
--- a/net/8021q/vlan_core.c
+++ b/net/8021q/vlan_core.c
@@ -48,8 +48,8 @@ bool vlan_do_receive(struct sk_buff **skbp)
* original position later
*/
skb_push(skb, offset);
- skb = *skbp = vlan_insert_tag(skb, skb->vlan_proto,
- skb->vlan_tci);
+ skb = *skbp = vlan_insert_inner_tag(skb, skb->vlan_proto,
+ skb->vlan_tci, skb->mac_len);
if (!skb)
return false;
skb_pull(skb, offset + VLAN_HLEN);
diff --git a/net/batman-adv/distributed-arp-table.c b/net/batman-adv/distributed-arp-table.c
index 9703c791ffc5ac..87cd962d28d58a 100644
--- a/net/batman-adv/distributed-arp-table.c
+++ b/net/batman-adv/distributed-arp-table.c
@@ -393,7 +393,7 @@ static void batadv_dbg_arp(struct batadv_priv *bat_priv, struct sk_buff *skb,
batadv_arp_hw_src(skb, hdr_size), &ip_src,
batadv_arp_hw_dst(skb, hdr_size), &ip_dst);
- if (hdr_size == 0)
+ if (hdr_size < sizeof(struct batadv_unicast_packet))
return;
unicast_4addr_packet = (struct batadv_unicast_4addr_packet *)skb->data;
diff --git a/net/batman-adv/icmp_socket.c b/net/batman-adv/icmp_socket.c
index e91f29c7c638a5..5daa3d50da1772 100644
--- a/net/batman-adv/icmp_socket.c
+++ b/net/batman-adv/icmp_socket.c
@@ -24,6 +24,7 @@
#include <linux/debugfs.h>
#include <linux/errno.h>
#include <linux/etherdevice.h>
+#include <linux/eventpoll.h>
#include <linux/export.h>
#include <linux/fcntl.h>
#include <linux/fs.h>
diff --git a/net/batman-adv/log.c b/net/batman-adv/log.c
index dc9fa37ddd1414..cdbe0e5e208b8b 100644
--- a/net/batman-adv/log.c
+++ b/net/batman-adv/log.c
@@ -22,6 +22,7 @@
#include <linux/compiler.h>
#include <linux/debugfs.h>
#include <linux/errno.h>
+#include <linux/eventpoll.h>
#include <linux/export.h>
#include <linux/fcntl.h>
#include <linux/fs.h>
diff --git a/net/batman-adv/multicast.c b/net/batman-adv/multicast.c
index cbdeb47ec3f606..d70640135e3ab2 100644
--- a/net/batman-adv/multicast.c
+++ b/net/batman-adv/multicast.c
@@ -543,8 +543,8 @@ update:
bat_priv->mcast.enabled = true;
}
- return !(mcast_data.flags &
- (BATADV_MCAST_WANT_ALL_IPV4 | BATADV_MCAST_WANT_ALL_IPV6));
+ return !(mcast_data.flags & BATADV_MCAST_WANT_ALL_IPV4 &&
+ mcast_data.flags & BATADV_MCAST_WANT_ALL_IPV6);
}
/**
diff --git a/net/batman-adv/routing.c b/net/batman-adv/routing.c
index b6891e8b741c42..e61dc1293bb5aa 100644
--- a/net/batman-adv/routing.c
+++ b/net/batman-adv/routing.c
@@ -759,6 +759,7 @@ free_skb:
/**
* batadv_reroute_unicast_packet() - update the unicast header for re-routing
* @bat_priv: the bat priv with all the soft interface information
+ * @skb: unicast packet to process
* @unicast_packet: the unicast header to be updated
* @dst_addr: the payload destination
* @vid: VLAN identifier
@@ -770,7 +771,7 @@ free_skb:
* Return: true if the packet header has been updated, false otherwise
*/
static bool
-batadv_reroute_unicast_packet(struct batadv_priv *bat_priv,
+batadv_reroute_unicast_packet(struct batadv_priv *bat_priv, struct sk_buff *skb,
struct batadv_unicast_packet *unicast_packet,
u8 *dst_addr, unsigned short vid)
{
@@ -799,8 +800,10 @@ batadv_reroute_unicast_packet(struct batadv_priv *bat_priv,
}
/* update the packet header */
+ skb_postpull_rcsum(skb, unicast_packet, sizeof(*unicast_packet));
ether_addr_copy(unicast_packet->dest, orig_addr);
unicast_packet->ttvn = orig_ttvn;
+ skb_postpush_rcsum(skb, unicast_packet, sizeof(*unicast_packet));
ret = true;
out:
@@ -841,7 +844,7 @@ static bool batadv_check_unicast_ttvn(struct batadv_priv *bat_priv,
* the packet to
*/
if (batadv_tt_local_client_is_roaming(bat_priv, ethhdr->h_dest, vid)) {
- if (batadv_reroute_unicast_packet(bat_priv, unicast_packet,
+ if (batadv_reroute_unicast_packet(bat_priv, skb, unicast_packet,
ethhdr->h_dest, vid))
batadv_dbg_ratelimited(BATADV_DBG_TT,
bat_priv,
@@ -887,7 +890,7 @@ static bool batadv_check_unicast_ttvn(struct batadv_priv *bat_priv,
* destination can possibly be updated and forwarded towards the new
* target host
*/
- if (batadv_reroute_unicast_packet(bat_priv, unicast_packet,
+ if (batadv_reroute_unicast_packet(bat_priv, skb, unicast_packet,
ethhdr->h_dest, vid)) {
batadv_dbg_ratelimited(BATADV_DBG_TT, bat_priv,
"Rerouting unicast packet to %pM (dst=%pM): TTVN mismatch old_ttvn=%u new_ttvn=%u\n",
@@ -910,12 +913,14 @@ static bool batadv_check_unicast_ttvn(struct batadv_priv *bat_priv,
if (!primary_if)
return false;
+ /* update the packet header */
+ skb_postpull_rcsum(skb, unicast_packet, sizeof(*unicast_packet));
ether_addr_copy(unicast_packet->dest, primary_if->net_dev->dev_addr);
+ unicast_packet->ttvn = curr_ttvn;
+ skb_postpush_rcsum(skb, unicast_packet, sizeof(*unicast_packet));
batadv_hardif_put(primary_if);
- unicast_packet->ttvn = curr_ttvn;
-
return true;
}
@@ -968,14 +973,10 @@ int batadv_recv_unicast_packet(struct sk_buff *skb,
struct batadv_orig_node *orig_node = NULL, *orig_node_gw = NULL;
int check, hdr_size = sizeof(*unicast_packet);
enum batadv_subtype subtype;
- struct ethhdr *ethhdr;
int ret = NET_RX_DROP;
bool is4addr, is_gw;
unicast_packet = (struct batadv_unicast_packet *)skb->data;
- unicast_4addr_packet = (struct batadv_unicast_4addr_packet *)skb->data;
- ethhdr = eth_hdr(skb);
-
is4addr = unicast_packet->packet_type == BATADV_UNICAST_4ADDR;
/* the caller function should have already pulled 2 bytes */
if (is4addr)
@@ -995,12 +996,14 @@ int batadv_recv_unicast_packet(struct sk_buff *skb,
if (!batadv_check_unicast_ttvn(bat_priv, skb, hdr_size))
goto free_skb;
+ unicast_packet = (struct batadv_unicast_packet *)skb->data;
+
/* packet for me */
if (batadv_is_my_mac(bat_priv, unicast_packet->dest)) {
/* If this is a unicast packet from another backgone gw,
* drop it.
*/
- orig_addr_gw = ethhdr->h_source;
+ orig_addr_gw = eth_hdr(skb)->h_source;
orig_node_gw = batadv_orig_hash_find(bat_priv, orig_addr_gw);
if (orig_node_gw) {
is_gw = batadv_bla_is_backbone_gw(skb, orig_node_gw,
@@ -1015,6 +1018,8 @@ int batadv_recv_unicast_packet(struct sk_buff *skb,
}
if (is4addr) {
+ unicast_4addr_packet =
+ (struct batadv_unicast_4addr_packet *)skb->data;
subtype = unicast_4addr_packet->subtype;
batadv_dat_inc_counter(bat_priv, subtype);
diff --git a/net/bluetooth/smp.c b/net/bluetooth/smp.c
index 01117ae84f1d3f..a2ddae2f37d7a2 100644
--- a/net/bluetooth/smp.c
+++ b/net/bluetooth/smp.c
@@ -2296,8 +2296,14 @@ static u8 smp_cmd_security_req(struct l2cap_conn *conn, struct sk_buff *skb)
else
sec_level = authreq_to_seclevel(auth);
- if (smp_sufficient_security(hcon, sec_level, SMP_USE_LTK))
+ if (smp_sufficient_security(hcon, sec_level, SMP_USE_LTK)) {
+ /* If link is already encrypted with sufficient security we
+ * still need refresh encryption as per Core Spec 5.0 Vol 3,
+ * Part H 2.4.6
+ */
+ smp_ltk_encrypt(conn, hcon->sec_level);
return 0;
+ }
if (sec_level > hcon->pending_sec_level)
hcon->pending_sec_level = sec_level;
diff --git a/net/bridge/netfilter/ebt_among.c b/net/bridge/netfilter/ebt_among.c
index c5afb4232ecb4f..620e54f082965d 100644
--- a/net/bridge/netfilter/ebt_among.c
+++ b/net/bridge/netfilter/ebt_among.c
@@ -177,6 +177,28 @@ static bool poolsize_invalid(const struct ebt_mac_wormhash *w)
return w && w->poolsize >= (INT_MAX / sizeof(struct ebt_mac_wormhash_tuple));
}
+static bool wormhash_offset_invalid(int off, unsigned int len)
+{
+ if (off == 0) /* not present */
+ return false;
+
+ if (off < (int)sizeof(struct ebt_among_info) ||
+ off % __alignof__(struct ebt_mac_wormhash))
+ return true;
+
+ off += sizeof(struct ebt_mac_wormhash);
+
+ return off > len;
+}
+
+static bool wormhash_sizes_valid(const struct ebt_mac_wormhash *wh, int a, int b)
+{
+ if (a == 0)
+ a = sizeof(struct ebt_among_info);
+
+ return ebt_mac_wormhash_size(wh) + a == b;
+}
+
static int ebt_among_mt_check(const struct xt_mtchk_param *par)
{
const struct ebt_among_info *info = par->matchinfo;
@@ -189,6 +211,10 @@ static int ebt_among_mt_check(const struct xt_mtchk_param *par)
if (expected_length > em->match_size)
return -EINVAL;
+ if (wormhash_offset_invalid(info->wh_dst_ofs, em->match_size) ||
+ wormhash_offset_invalid(info->wh_src_ofs, em->match_size))
+ return -EINVAL;
+
wh_dst = ebt_among_wh_dst(info);
if (poolsize_invalid(wh_dst))
return -EINVAL;
@@ -201,6 +227,14 @@ static int ebt_among_mt_check(const struct xt_mtchk_param *par)
if (poolsize_invalid(wh_src))
return -EINVAL;
+ if (info->wh_src_ofs < info->wh_dst_ofs) {
+ if (!wormhash_sizes_valid(wh_src, info->wh_src_ofs, info->wh_dst_ofs))
+ return -EINVAL;
+ } else {
+ if (!wormhash_sizes_valid(wh_dst, info->wh_dst_ofs, info->wh_src_ofs))
+ return -EINVAL;
+ }
+
expected_length += ebt_mac_wormhash_size(wh_src);
if (em->match_size != EBT_ALIGN(expected_length)) {
diff --git a/net/bridge/netfilter/ebtables.c b/net/bridge/netfilter/ebtables.c
index 254ef9f4956790..a94d23b0a9af30 100644
--- a/net/bridge/netfilter/ebtables.c
+++ b/net/bridge/netfilter/ebtables.c
@@ -2119,8 +2119,12 @@ static int size_entry_mwt(struct ebt_entry *entry, const unsigned char *base,
* offsets are relative to beginning of struct ebt_entry (i.e., 0).
*/
for (i = 0; i < 4 ; ++i) {
- if (offsets[i] >= *total)
+ if (offsets[i] > *total)
return -EINVAL;
+
+ if (i < 3 && offsets[i] == *total)
+ return -EINVAL;
+
if (i == 0)
continue;
if (offsets[i-1] > offsets[i])
diff --git a/net/core/dev.c b/net/core/dev.c
index 2cedf520cb28f0..12be205357146f 100644
--- a/net/core/dev.c
+++ b/net/core/dev.c
@@ -3278,15 +3278,23 @@ static inline int __dev_xmit_skb(struct sk_buff *skb, struct Qdisc *q,
#if IS_ENABLED(CONFIG_CGROUP_NET_PRIO)
static void skb_update_prio(struct sk_buff *skb)
{
- struct netprio_map *map = rcu_dereference_bh(skb->dev->priomap);
+ const struct netprio_map *map;
+ const struct sock *sk;
+ unsigned int prioidx;
- if (!skb->priority && skb->sk && map) {
- unsigned int prioidx =
- sock_cgroup_prioidx(&skb->sk->sk_cgrp_data);
+ if (skb->priority)
+ return;
+ map = rcu_dereference_bh(skb->dev->priomap);
+ if (!map)
+ return;
+ sk = skb_to_full_sk(skb);
+ if (!sk)
+ return;
- if (prioidx < map->priomap_len)
- skb->priority = map->priomap[prioidx];
- }
+ prioidx = sock_cgroup_prioidx(&sk->sk_cgrp_data);
+
+ if (prioidx < map->priomap_len)
+ skb->priority = map->priomap[prioidx];
}
#else
#define skb_update_prio(skb)
diff --git a/net/core/dev_ioctl.c b/net/core/dev_ioctl.c
index 0ab1af04296cbf..a04e1e88bf3ab4 100644
--- a/net/core/dev_ioctl.c
+++ b/net/core/dev_ioctl.c
@@ -402,8 +402,6 @@ int dev_ioctl(struct net *net, unsigned int cmd, struct ifreq *ifr, bool *need_c
if (colon)
*colon = 0;
- dev_load(net, ifr->ifr_name);
-
/*
* See which interface the caller is talking about.
*/
@@ -423,6 +421,7 @@ int dev_ioctl(struct net *net, unsigned int cmd, struct ifreq *ifr, bool *need_c
case SIOCGIFMAP:
case SIOCGIFINDEX:
case SIOCGIFTXQLEN:
+ dev_load(net, ifr->ifr_name);
rcu_read_lock();
ret = dev_ifsioc_locked(net, ifr, cmd);
rcu_read_unlock();
@@ -431,6 +430,7 @@ int dev_ioctl(struct net *net, unsigned int cmd, struct ifreq *ifr, bool *need_c
return ret;
case SIOCETHTOOL:
+ dev_load(net, ifr->ifr_name);
rtnl_lock();
ret = dev_ethtool(net, ifr);
rtnl_unlock();
@@ -447,6 +447,7 @@ int dev_ioctl(struct net *net, unsigned int cmd, struct ifreq *ifr, bool *need_c
case SIOCGMIIPHY:
case SIOCGMIIREG:
case SIOCSIFNAME:
+ dev_load(net, ifr->ifr_name);
if (!ns_capable(net->user_ns, CAP_NET_ADMIN))
return -EPERM;
rtnl_lock();
@@ -494,6 +495,7 @@ int dev_ioctl(struct net *net, unsigned int cmd, struct ifreq *ifr, bool *need_c
/* fall through */
case SIOCBONDSLAVEINFOQUERY:
case SIOCBONDINFOQUERY:
+ dev_load(net, ifr->ifr_name);
rtnl_lock();
ret = dev_ifsioc(net, ifr, cmd);
rtnl_unlock();
@@ -518,6 +520,7 @@ int dev_ioctl(struct net *net, unsigned int cmd, struct ifreq *ifr, bool *need_c
cmd == SIOCGHWTSTAMP ||
(cmd >= SIOCDEVPRIVATE &&
cmd <= SIOCDEVPRIVATE + 15)) {
+ dev_load(net, ifr->ifr_name);
rtnl_lock();
ret = dev_ifsioc(net, ifr, cmd);
rtnl_unlock();
diff --git a/net/core/devlink.c b/net/core/devlink.c
index 2f2307d94787c4..effd4848c2b4be 100644
--- a/net/core/devlink.c
+++ b/net/core/devlink.c
@@ -1798,7 +1798,7 @@ send_done:
if (!nlh) {
err = devlink_dpipe_send_and_alloc_skb(&skb, info);
if (err)
- goto err_skb_send_alloc;
+ return err;
goto send_done;
}
@@ -1807,7 +1807,6 @@ send_done:
nla_put_failure:
err = -EMSGSIZE;
err_table_put:
-err_skb_send_alloc:
genlmsg_cancel(skb, hdr);
nlmsg_free(skb);
return err;
@@ -2073,7 +2072,7 @@ static int devlink_dpipe_entries_fill(struct genl_info *info,
table->counters_enabled,
&dump_ctx);
if (err)
- goto err_entries_dump;
+ return err;
send_done:
nlh = nlmsg_put(dump_ctx.skb, info->snd_portid, info->snd_seq,
@@ -2081,16 +2080,10 @@ send_done:
if (!nlh) {
err = devlink_dpipe_send_and_alloc_skb(&dump_ctx.skb, info);
if (err)
- goto err_skb_send_alloc;
+ return err;
goto send_done;
}
return genlmsg_reply(dump_ctx.skb, info);
-
-err_entries_dump:
-err_skb_send_alloc:
- genlmsg_cancel(dump_ctx.skb, dump_ctx.hdr);
- nlmsg_free(dump_ctx.skb);
- return err;
}
static int devlink_nl_cmd_dpipe_entries_get(struct sk_buff *skb,
@@ -2229,7 +2222,7 @@ send_done:
if (!nlh) {
err = devlink_dpipe_send_and_alloc_skb(&skb, info);
if (err)
- goto err_skb_send_alloc;
+ return err;
goto send_done;
}
return genlmsg_reply(skb, info);
@@ -2237,7 +2230,6 @@ send_done:
nla_put_failure:
err = -EMSGSIZE;
err_table_put:
-err_skb_send_alloc:
genlmsg_cancel(skb, hdr);
nlmsg_free(skb);
return err;
diff --git a/net/core/filter.c b/net/core/filter.c
index 0c121adbdbaaac..48aa7c7320db5f 100644
--- a/net/core/filter.c
+++ b/net/core/filter.c
@@ -2087,6 +2087,10 @@ static int bpf_skb_proto_4_to_6(struct sk_buff *skb)
u32 off = skb_mac_header_len(skb);
int ret;
+ /* SCTP uses GSO_BY_FRAGS, thus cannot adjust it. */
+ if (skb_is_gso(skb) && unlikely(skb_is_gso_sctp(skb)))
+ return -ENOTSUPP;
+
ret = skb_cow(skb, len_diff);
if (unlikely(ret < 0))
return ret;
@@ -2096,19 +2100,21 @@ static int bpf_skb_proto_4_to_6(struct sk_buff *skb)
return ret;
if (skb_is_gso(skb)) {
+ struct skb_shared_info *shinfo = skb_shinfo(skb);
+
/* SKB_GSO_TCPV4 needs to be changed into
* SKB_GSO_TCPV6.
*/
- if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV4) {
- skb_shinfo(skb)->gso_type &= ~SKB_GSO_TCPV4;
- skb_shinfo(skb)->gso_type |= SKB_GSO_TCPV6;
+ if (shinfo->gso_type & SKB_GSO_TCPV4) {
+ shinfo->gso_type &= ~SKB_GSO_TCPV4;
+ shinfo->gso_type |= SKB_GSO_TCPV6;
}
/* Due to IPv6 header, MSS needs to be downgraded. */
- skb_shinfo(skb)->gso_size -= len_diff;
+ skb_decrease_gso_size(shinfo, len_diff);
/* Header must be checked, and gso_segs recomputed. */
- skb_shinfo(skb)->gso_type |= SKB_GSO_DODGY;
- skb_shinfo(skb)->gso_segs = 0;
+ shinfo->gso_type |= SKB_GSO_DODGY;
+ shinfo->gso_segs = 0;
}
skb->protocol = htons(ETH_P_IPV6);
@@ -2123,6 +2129,10 @@ static int bpf_skb_proto_6_to_4(struct sk_buff *skb)
u32 off = skb_mac_header_len(skb);
int ret;
+ /* SCTP uses GSO_BY_FRAGS, thus cannot adjust it. */
+ if (skb_is_gso(skb) && unlikely(skb_is_gso_sctp(skb)))
+ return -ENOTSUPP;
+
ret = skb_unclone(skb, GFP_ATOMIC);
if (unlikely(ret < 0))
return ret;
@@ -2132,19 +2142,21 @@ static int bpf_skb_proto_6_to_4(struct sk_buff *skb)
return ret;
if (skb_is_gso(skb)) {
+ struct skb_shared_info *shinfo = skb_shinfo(skb);
+
/* SKB_GSO_TCPV6 needs to be changed into
* SKB_GSO_TCPV4.
*/
- if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6) {
- skb_shinfo(skb)->gso_type &= ~SKB_GSO_TCPV6;
- skb_shinfo(skb)->gso_type |= SKB_GSO_TCPV4;
+ if (shinfo->gso_type & SKB_GSO_TCPV6) {
+ shinfo->gso_type &= ~SKB_GSO_TCPV6;
+ shinfo->gso_type |= SKB_GSO_TCPV4;
}
/* Due to IPv4 header, MSS can be upgraded. */
- skb_shinfo(skb)->gso_size += len_diff;
+ skb_increase_gso_size(shinfo, len_diff);
/* Header must be checked, and gso_segs recomputed. */
- skb_shinfo(skb)->gso_type |= SKB_GSO_DODGY;
- skb_shinfo(skb)->gso_segs = 0;
+ shinfo->gso_type |= SKB_GSO_DODGY;
+ shinfo->gso_segs = 0;
}
skb->protocol = htons(ETH_P_IP);
@@ -2243,6 +2255,10 @@ static int bpf_skb_net_grow(struct sk_buff *skb, u32 len_diff)
u32 off = skb_mac_header_len(skb) + bpf_skb_net_base_len(skb);
int ret;
+ /* SCTP uses GSO_BY_FRAGS, thus cannot adjust it. */
+ if (skb_is_gso(skb) && unlikely(skb_is_gso_sctp(skb)))
+ return -ENOTSUPP;
+
ret = skb_cow(skb, len_diff);
if (unlikely(ret < 0))
return ret;
@@ -2252,11 +2268,13 @@ static int bpf_skb_net_grow(struct sk_buff *skb, u32 len_diff)
return ret;
if (skb_is_gso(skb)) {
+ struct skb_shared_info *shinfo = skb_shinfo(skb);
+
/* Due to header grow, MSS needs to be downgraded. */
- skb_shinfo(skb)->gso_size -= len_diff;
+ skb_decrease_gso_size(shinfo, len_diff);
/* Header must be checked, and gso_segs recomputed. */
- skb_shinfo(skb)->gso_type |= SKB_GSO_DODGY;
- skb_shinfo(skb)->gso_segs = 0;
+ shinfo->gso_type |= SKB_GSO_DODGY;
+ shinfo->gso_segs = 0;
}
return 0;
@@ -2267,6 +2285,10 @@ static int bpf_skb_net_shrink(struct sk_buff *skb, u32 len_diff)
u32 off = skb_mac_header_len(skb) + bpf_skb_net_base_len(skb);
int ret;
+ /* SCTP uses GSO_BY_FRAGS, thus cannot adjust it. */
+ if (skb_is_gso(skb) && unlikely(skb_is_gso_sctp(skb)))
+ return -ENOTSUPP;
+
ret = skb_unclone(skb, GFP_ATOMIC);
if (unlikely(ret < 0))
return ret;
@@ -2276,11 +2298,13 @@ static int bpf_skb_net_shrink(struct sk_buff *skb, u32 len_diff)
return ret;
if (skb_is_gso(skb)) {
+ struct skb_shared_info *shinfo = skb_shinfo(skb);
+
/* Due to header shrink, MSS can be upgraded. */
- skb_shinfo(skb)->gso_size += len_diff;
+ skb_increase_gso_size(shinfo, len_diff);
/* Header must be checked, and gso_segs recomputed. */
- skb_shinfo(skb)->gso_type |= SKB_GSO_DODGY;
- skb_shinfo(skb)->gso_segs = 0;
+ shinfo->gso_type |= SKB_GSO_DODGY;
+ shinfo->gso_segs = 0;
}
return 0;
diff --git a/net/core/skbuff.c b/net/core/skbuff.c
index 0bb0d88779544b..1e7acdc30732ef 100644
--- a/net/core/skbuff.c
+++ b/net/core/skbuff.c
@@ -4179,7 +4179,7 @@ int sock_queue_err_skb(struct sock *sk, struct sk_buff *skb)
skb_queue_tail(&sk->sk_error_queue, skb);
if (!sock_flag(sk, SOCK_DEAD))
- sk->sk_data_ready(sk);
+ sk->sk_error_report(sk);
return 0;
}
EXPORT_SYMBOL(sock_queue_err_skb);
@@ -4904,7 +4904,7 @@ static unsigned int skb_gso_transport_seglen(const struct sk_buff *skb)
thlen += inner_tcp_hdrlen(skb);
} else if (likely(shinfo->gso_type & (SKB_GSO_TCPV4 | SKB_GSO_TCPV6))) {
thlen = tcp_hdrlen(skb);
- } else if (unlikely(shinfo->gso_type & SKB_GSO_SCTP)) {
+ } else if (unlikely(skb_is_gso_sctp(skb))) {
thlen = sizeof(struct sctphdr);
}
/* UFO sets gso_size to the size of the fragmentation
@@ -5020,13 +5020,16 @@ EXPORT_SYMBOL_GPL(skb_gso_validate_mac_len);
static struct sk_buff *skb_reorder_vlan_header(struct sk_buff *skb)
{
+ int mac_len;
+
if (skb_cow(skb, skb_headroom(skb)) < 0) {
kfree_skb(skb);
return NULL;
}
- memmove(skb->data - ETH_HLEN, skb->data - skb->mac_len - VLAN_HLEN,
- 2 * ETH_ALEN);
+ mac_len = skb->data - skb_mac_header(skb);
+ memmove(skb_mac_header(skb) + VLAN_HLEN, skb_mac_header(skb),
+ mac_len - VLAN_HLEN - ETH_TLEN);
skb->mac_header += VLAN_HLEN;
return skb;
}
diff --git a/net/core/sock.c b/net/core/sock.c
index c501499a04fe97..85b0b64e7f9dd5 100644
--- a/net/core/sock.c
+++ b/net/core/sock.c
@@ -3261,6 +3261,27 @@ void proto_unregister(struct proto *prot)
}
EXPORT_SYMBOL(proto_unregister);
+int sock_load_diag_module(int family, int protocol)
+{
+ if (!protocol) {
+ if (!sock_is_registered(family))
+ return -ENOENT;
+
+ return request_module("net-pf-%d-proto-%d-type-%d", PF_NETLINK,
+ NETLINK_SOCK_DIAG, family);
+ }
+
+#ifdef CONFIG_INET
+ if (family == AF_INET &&
+ !rcu_access_pointer(inet_protos[protocol]))
+ return -ENOENT;
+#endif
+
+ return request_module("net-pf-%d-proto-%d-type-%d-%d", PF_NETLINK,
+ NETLINK_SOCK_DIAG, family, protocol);
+}
+EXPORT_SYMBOL(sock_load_diag_module);
+
#ifdef CONFIG_PROC_FS
static void *proto_seq_start(struct seq_file *seq, loff_t *pos)
__acquires(proto_list_mutex)
diff --git a/net/core/sock_diag.c b/net/core/sock_diag.c
index 146b50e30659da..c37b5be7c5e4f0 100644
--- a/net/core/sock_diag.c
+++ b/net/core/sock_diag.c
@@ -220,8 +220,7 @@ static int __sock_diag_cmd(struct sk_buff *skb, struct nlmsghdr *nlh)
return -EINVAL;
if (sock_diag_handlers[req->sdiag_family] == NULL)
- request_module("net-pf-%d-proto-%d-type-%d", PF_NETLINK,
- NETLINK_SOCK_DIAG, req->sdiag_family);
+ sock_load_diag_module(req->sdiag_family, 0);
mutex_lock(&sock_diag_table_mutex);
hndl = sock_diag_handlers[req->sdiag_family];
@@ -247,8 +246,7 @@ static int sock_diag_rcv_msg(struct sk_buff *skb, struct nlmsghdr *nlh,
case TCPDIAG_GETSOCK:
case DCCPDIAG_GETSOCK:
if (inet_rcv_compat == NULL)
- request_module("net-pf-%d-proto-%d-type-%d", PF_NETLINK,
- NETLINK_SOCK_DIAG, AF_INET);
+ sock_load_diag_module(AF_INET, 0);
mutex_lock(&sock_diag_table_mutex);
if (inet_rcv_compat != NULL)
@@ -281,14 +279,12 @@ static int sock_diag_bind(struct net *net, int group)
case SKNLGRP_INET_TCP_DESTROY:
case SKNLGRP_INET_UDP_DESTROY:
if (!sock_diag_handlers[AF_INET])
- request_module("net-pf-%d-proto-%d-type-%d", PF_NETLINK,
- NETLINK_SOCK_DIAG, AF_INET);
+ sock_load_diag_module(AF_INET, 0);
break;
case SKNLGRP_INET6_TCP_DESTROY:
case SKNLGRP_INET6_UDP_DESTROY:
if (!sock_diag_handlers[AF_INET6])
- request_module("net-pf-%d-proto-%d-type-%d", PF_NETLINK,
- NETLINK_SOCK_DIAG, AF_INET6);
+ sock_load_diag_module(AF_INET6, 0);
break;
}
return 0;
diff --git a/net/dccp/proto.c b/net/dccp/proto.c
index 15bdc002d90c0f..84cd4e3fd01b1d 100644
--- a/net/dccp/proto.c
+++ b/net/dccp/proto.c
@@ -794,6 +794,11 @@ int dccp_sendmsg(struct sock *sk, struct msghdr *msg, size_t len)
if (skb == NULL)
goto out_release;
+ if (sk->sk_state == DCCP_CLOSED) {
+ rc = -ENOTCONN;
+ goto out_discard;
+ }
+
skb_reserve(skb, sk->sk_prot->max_header);
rc = memcpy_from_msg(skb_put(skb, len), msg, len);
if (rc != 0)
diff --git a/net/dsa/legacy.c b/net/dsa/legacy.c
index cb54b81d0bd9e6..42a7b85b84e1f6 100644
--- a/net/dsa/legacy.c
+++ b/net/dsa/legacy.c
@@ -194,7 +194,7 @@ static int dsa_switch_setup_one(struct dsa_switch *ds,
ds->ports[i].dn = cd->port_dn[i];
ds->ports[i].cpu_dp = dst->cpu_dp;
- if (dsa_is_user_port(ds, i))
+ if (!dsa_is_user_port(ds, i))
continue;
ret = dsa_slave_create(&ds->ports[i]);
diff --git a/net/ieee802154/6lowpan/core.c b/net/ieee802154/6lowpan/core.c
index 974765b7d92a75..e9f0489e422944 100644
--- a/net/ieee802154/6lowpan/core.c
+++ b/net/ieee802154/6lowpan/core.c
@@ -206,9 +206,13 @@ static inline void lowpan_netlink_fini(void)
static int lowpan_device_event(struct notifier_block *unused,
unsigned long event, void *ptr)
{
- struct net_device *wdev = netdev_notifier_info_to_dev(ptr);
+ struct net_device *ndev = netdev_notifier_info_to_dev(ptr);
+ struct wpan_dev *wpan_dev;
- if (wdev->type != ARPHRD_IEEE802154)
+ if (ndev->type != ARPHRD_IEEE802154)
+ return NOTIFY_DONE;
+ wpan_dev = ndev->ieee802154_ptr;
+ if (!wpan_dev)
return NOTIFY_DONE;
switch (event) {
@@ -217,8 +221,8 @@ static int lowpan_device_event(struct notifier_block *unused,
* also delete possible lowpan interfaces which belongs
* to the wpan interface.
*/
- if (wdev->ieee802154_ptr->lowpan_dev)
- lowpan_dellink(wdev->ieee802154_ptr->lowpan_dev, NULL);
+ if (wpan_dev->lowpan_dev)
+ lowpan_dellink(wpan_dev->lowpan_dev, NULL);
break;
default:
return NOTIFY_DONE;
diff --git a/net/ipv4/inet_diag.c b/net/ipv4/inet_diag.c
index a383f299ce246b..4e5bc4b2f14e67 100644
--- a/net/ipv4/inet_diag.c
+++ b/net/ipv4/inet_diag.c
@@ -53,8 +53,7 @@ static DEFINE_MUTEX(inet_diag_table_mutex);
static const struct inet_diag_handler *inet_diag_lock_handler(int proto)
{
if (!inet_diag_table[proto])
- request_module("net-pf-%d-proto-%d-type-%d-%d", PF_NETLINK,
- NETLINK_SOCK_DIAG, AF_INET, proto);
+ sock_load_diag_module(AF_INET, proto);
mutex_lock(&inet_diag_table_mutex);
if (!inet_diag_table[proto])
diff --git a/net/ipv4/inet_fragment.c b/net/ipv4/inet_fragment.c
index 26a3d0315728ed..e8ec28999f5ce0 100644
--- a/net/ipv4/inet_fragment.c
+++ b/net/ipv4/inet_fragment.c
@@ -119,6 +119,9 @@ out:
static bool inet_fragq_should_evict(const struct inet_frag_queue *q)
{
+ if (!hlist_unhashed(&q->list_evictor))
+ return false;
+
return q->net->low_thresh == 0 ||
frag_mem_limit(q->net) >= q->net->low_thresh;
}
diff --git a/net/ipv4/ip_sockglue.c b/net/ipv4/ip_sockglue.c
index 9c41a0cef1a510..74c962b9b09c3c 100644
--- a/net/ipv4/ip_sockglue.c
+++ b/net/ipv4/ip_sockglue.c
@@ -258,7 +258,8 @@ int ip_cmsg_send(struct sock *sk, struct msghdr *msg, struct ipcm_cookie *ipc,
src_info = (struct in6_pktinfo *)CMSG_DATA(cmsg);
if (!ipv6_addr_v4mapped(&src_info->ipi6_addr))
return -EINVAL;
- ipc->oif = src_info->ipi6_ifindex;
+ if (src_info->ipi6_ifindex)
+ ipc->oif = src_info->ipi6_ifindex;
ipc->addr = src_info->ipi6_addr.s6_addr32[3];
continue;
}
@@ -288,7 +289,8 @@ int ip_cmsg_send(struct sock *sk, struct msghdr *msg, struct ipcm_cookie *ipc,
if (cmsg->cmsg_len != CMSG_LEN(sizeof(struct in_pktinfo)))
return -EINVAL;
info = (struct in_pktinfo *)CMSG_DATA(cmsg);
- ipc->oif = info->ipi_ifindex;
+ if (info->ipi_ifindex)
+ ipc->oif = info->ipi_ifindex;
ipc->addr = info->ipi_spec_dst.s_addr;
break;
}
diff --git a/net/ipv4/route.c b/net/ipv4/route.c
index 860b3fd2f54b13..299e247b20326d 100644
--- a/net/ipv4/route.c
+++ b/net/ipv4/route.c
@@ -634,6 +634,7 @@ static inline u32 fnhe_hashfun(__be32 daddr)
static void fill_route_from_fnhe(struct rtable *rt, struct fib_nh_exception *fnhe)
{
rt->rt_pmtu = fnhe->fnhe_pmtu;
+ rt->rt_mtu_locked = fnhe->fnhe_mtu_locked;
rt->dst.expires = fnhe->fnhe_expires;
if (fnhe->fnhe_gw) {
@@ -644,7 +645,7 @@ static void fill_route_from_fnhe(struct rtable *rt, struct fib_nh_exception *fnh
}
static void update_or_create_fnhe(struct fib_nh *nh, __be32 daddr, __be32 gw,
- u32 pmtu, unsigned long expires)
+ u32 pmtu, bool lock, unsigned long expires)
{
struct fnhe_hash_bucket *hash;
struct fib_nh_exception *fnhe;
@@ -681,8 +682,10 @@ static void update_or_create_fnhe(struct fib_nh *nh, __be32 daddr, __be32 gw,
fnhe->fnhe_genid = genid;
if (gw)
fnhe->fnhe_gw = gw;
- if (pmtu)
+ if (pmtu) {
fnhe->fnhe_pmtu = pmtu;
+ fnhe->fnhe_mtu_locked = lock;
+ }
fnhe->fnhe_expires = max(1UL, expires);
/* Update all cached dsts too */
rt = rcu_dereference(fnhe->fnhe_rth_input);
@@ -706,6 +709,7 @@ static void update_or_create_fnhe(struct fib_nh *nh, __be32 daddr, __be32 gw,
fnhe->fnhe_daddr = daddr;
fnhe->fnhe_gw = gw;
fnhe->fnhe_pmtu = pmtu;
+ fnhe->fnhe_mtu_locked = lock;
fnhe->fnhe_expires = expires;
/* Exception created; mark the cached routes for the nexthop
@@ -787,7 +791,8 @@ static void __ip_do_redirect(struct rtable *rt, struct sk_buff *skb, struct flow
struct fib_nh *nh = &FIB_RES_NH(res);
update_or_create_fnhe(nh, fl4->daddr, new_gw,
- 0, jiffies + ip_rt_gc_timeout);
+ 0, false,
+ jiffies + ip_rt_gc_timeout);
}
if (kill_route)
rt->dst.obsolete = DST_OBSOLETE_KILL;
@@ -1009,15 +1014,18 @@ static void __ip_rt_update_pmtu(struct rtable *rt, struct flowi4 *fl4, u32 mtu)
{
struct dst_entry *dst = &rt->dst;
struct fib_result res;
+ bool lock = false;
- if (dst_metric_locked(dst, RTAX_MTU))
+ if (ip_mtu_locked(dst))
return;
if (ipv4_mtu(dst) < mtu)
return;
- if (mtu < ip_rt_min_pmtu)
+ if (mtu < ip_rt_min_pmtu) {
+ lock = true;
mtu = ip_rt_min_pmtu;
+ }
if (rt->rt_pmtu == mtu &&
time_before(jiffies, dst->expires - ip_rt_mtu_expires / 2))
@@ -1027,7 +1035,7 @@ static void __ip_rt_update_pmtu(struct rtable *rt, struct flowi4 *fl4, u32 mtu)
if (fib_lookup(dev_net(dst->dev), fl4, &res, 0) == 0) {
struct fib_nh *nh = &FIB_RES_NH(res);
- update_or_create_fnhe(nh, fl4->daddr, 0, mtu,
+ update_or_create_fnhe(nh, fl4->daddr, 0, mtu, lock,
jiffies + ip_rt_mtu_expires);
}
rcu_read_unlock();
@@ -1280,7 +1288,7 @@ static unsigned int ipv4_mtu(const struct dst_entry *dst)
mtu = READ_ONCE(dst->dev->mtu);
- if (unlikely(dst_metric_locked(dst, RTAX_MTU))) {
+ if (unlikely(ip_mtu_locked(dst))) {
if (rt->rt_uses_gateway && mtu > 576)
mtu = 576;
}
@@ -1393,7 +1401,7 @@ struct uncached_list {
static DEFINE_PER_CPU_ALIGNED(struct uncached_list, rt_uncached_list);
-static void rt_add_uncached_list(struct rtable *rt)
+void rt_add_uncached_list(struct rtable *rt)
{
struct uncached_list *ul = raw_cpu_ptr(&rt_uncached_list);
@@ -1404,14 +1412,8 @@ static void rt_add_uncached_list(struct rtable *rt)
spin_unlock_bh(&ul->lock);
}
-static void ipv4_dst_destroy(struct dst_entry *dst)
+void rt_del_uncached_list(struct rtable *rt)
{
- struct dst_metrics *p = (struct dst_metrics *)DST_METRICS_PTR(dst);
- struct rtable *rt = (struct rtable *) dst;
-
- if (p != &dst_default_metrics && refcount_dec_and_test(&p->refcnt))
- kfree(p);
-
if (!list_empty(&rt->rt_uncached)) {
struct uncached_list *ul = rt->rt_uncached_list;
@@ -1421,6 +1423,17 @@ static void ipv4_dst_destroy(struct dst_entry *dst)
}
}
+static void ipv4_dst_destroy(struct dst_entry *dst)
+{
+ struct dst_metrics *p = (struct dst_metrics *)DST_METRICS_PTR(dst);
+ struct rtable *rt = (struct rtable *)dst;
+
+ if (p != &dst_default_metrics && refcount_dec_and_test(&p->refcnt))
+ kfree(p);
+
+ rt_del_uncached_list(rt);
+}
+
void rt_flush_dev(struct net_device *dev)
{
struct net *net = dev_net(dev);
@@ -1516,6 +1529,7 @@ struct rtable *rt_dst_alloc(struct net_device *dev,
rt->rt_is_input = 0;
rt->rt_iif = 0;
rt->rt_pmtu = 0;
+ rt->rt_mtu_locked = 0;
rt->rt_gateway = 0;
rt->rt_uses_gateway = 0;
rt->rt_table_id = 0;
@@ -2541,6 +2555,7 @@ struct dst_entry *ipv4_blackhole_route(struct net *net, struct dst_entry *dst_or
rt->rt_is_input = ort->rt_is_input;
rt->rt_iif = ort->rt_iif;
rt->rt_pmtu = ort->rt_pmtu;
+ rt->rt_mtu_locked = ort->rt_mtu_locked;
rt->rt_genid = rt_genid_ipv4(net);
rt->rt_flags = ort->rt_flags;
@@ -2643,6 +2658,8 @@ static int rt_fill_info(struct net *net, __be32 dst, __be32 src, u32 table_id,
memcpy(metrics, dst_metrics_ptr(&rt->dst), sizeof(metrics));
if (rt->rt_pmtu && expires)
metrics[RTAX_MTU - 1] = rt->rt_pmtu;
+ if (rt->rt_mtu_locked && expires)
+ metrics[RTAX_LOCK - 1] |= BIT(RTAX_MTU);
if (rtnetlink_put_metrics(skb, metrics) < 0)
goto nla_put_failure;
diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c
index 48636aee23c312..8b8059b7af4dd4 100644
--- a/net/ipv4/tcp.c
+++ b/net/ipv4/tcp.c
@@ -3566,6 +3566,7 @@ int tcp_abort(struct sock *sk, int err)
bh_unlock_sock(sk);
local_bh_enable();
+ tcp_write_queue_purge(sk);
release_sock(sk);
return 0;
}
diff --git a/net/ipv4/tcp_timer.c b/net/ipv4/tcp_timer.c
index 71fc60f1b326f2..f7d944855f8ebd 100644
--- a/net/ipv4/tcp_timer.c
+++ b/net/ipv4/tcp_timer.c
@@ -34,6 +34,7 @@ static void tcp_write_err(struct sock *sk)
sk->sk_err = sk->sk_err_soft ? : ETIMEDOUT;
sk->sk_error_report(sk);
+ tcp_write_queue_purge(sk);
tcp_done(sk);
__NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPABORTONTIMEOUT);
}
diff --git a/net/ipv4/xfrm4_mode_tunnel.c b/net/ipv4/xfrm4_mode_tunnel.c
index 63faeee989a99d..2a9764bd171969 100644
--- a/net/ipv4/xfrm4_mode_tunnel.c
+++ b/net/ipv4/xfrm4_mode_tunnel.c
@@ -92,7 +92,8 @@ static int xfrm4_mode_tunnel_input(struct xfrm_state *x, struct sk_buff *skb)
skb_reset_network_header(skb);
skb_mac_header_rebuild(skb);
- eth_hdr(skb)->h_proto = skb->protocol;
+ if (skb->mac_len)
+ eth_hdr(skb)->h_proto = skb->protocol;
err = 0;
diff --git a/net/ipv4/xfrm4_policy.c b/net/ipv4/xfrm4_policy.c
index 05017e2c849c12..fbebda67ac1bdd 100644
--- a/net/ipv4/xfrm4_policy.c
+++ b/net/ipv4/xfrm4_policy.c
@@ -100,8 +100,10 @@ static int xfrm4_fill_dst(struct xfrm_dst *xdst, struct net_device *dev,
xdst->u.rt.rt_gateway = rt->rt_gateway;
xdst->u.rt.rt_uses_gateway = rt->rt_uses_gateway;
xdst->u.rt.rt_pmtu = rt->rt_pmtu;
+ xdst->u.rt.rt_mtu_locked = rt->rt_mtu_locked;
xdst->u.rt.rt_table_id = rt->rt_table_id;
INIT_LIST_HEAD(&xdst->u.rt.rt_uncached);
+ rt_add_uncached_list(&xdst->u.rt);
return 0;
}
@@ -241,7 +243,8 @@ static void xfrm4_dst_destroy(struct dst_entry *dst)
struct xfrm_dst *xdst = (struct xfrm_dst *)dst;
dst_destroy_metrics_generic(dst);
-
+ if (xdst->u.rt.rt_uncached_list)
+ rt_del_uncached_list(&xdst->u.rt);
xfrm_dst_destroy(xdst);
}
diff --git a/net/ipv6/datagram.c b/net/ipv6/datagram.c
index fbf08ce3f5ab75..a9f7eca0b6a3fd 100644
--- a/net/ipv6/datagram.c
+++ b/net/ipv6/datagram.c
@@ -146,10 +146,12 @@ int __ip6_datagram_connect(struct sock *sk, struct sockaddr *uaddr,
struct sockaddr_in6 *usin = (struct sockaddr_in6 *) uaddr;
struct inet_sock *inet = inet_sk(sk);
struct ipv6_pinfo *np = inet6_sk(sk);
- struct in6_addr *daddr;
+ struct in6_addr *daddr, old_daddr;
+ __be32 fl6_flowlabel = 0;
+ __be32 old_fl6_flowlabel;
+ __be16 old_dport;
int addr_type;
int err;
- __be32 fl6_flowlabel = 0;
if (usin->sin6_family == AF_INET) {
if (__ipv6_only_sock(sk))
@@ -238,9 +240,13 @@ ipv4_connected:
}
}
+ /* save the current peer information before updating it */
+ old_daddr = sk->sk_v6_daddr;
+ old_fl6_flowlabel = np->flow_label;
+ old_dport = inet->inet_dport;
+
sk->sk_v6_daddr = *daddr;
np->flow_label = fl6_flowlabel;
-
inet->inet_dport = usin->sin6_port;
/*
@@ -250,11 +256,12 @@ ipv4_connected:
err = ip6_datagram_dst_update(sk, true);
if (err) {
- /* Reset daddr and dport so that udp_v6_early_demux()
- * fails to find this socket
+ /* Restore the socket peer info, to keep it consistent with
+ * the old socket state
*/
- memset(&sk->sk_v6_daddr, 0, sizeof(sk->sk_v6_daddr));
- inet->inet_dport = 0;
+ sk->sk_v6_daddr = old_daddr;
+ np->flow_label = old_fl6_flowlabel;
+ inet->inet_dport = old_dport;
goto out;
}
diff --git a/net/ipv6/ip6_gre.c b/net/ipv6/ip6_gre.c
index 3c353125546d87..1bbd0930063eec 100644
--- a/net/ipv6/ip6_gre.c
+++ b/net/ipv6/ip6_gre.c
@@ -126,7 +126,8 @@ static struct ip6_tnl *ip6gre_tunnel_lookup(struct net_device *dev,
struct ip6_tnl *t, *cand = NULL;
struct ip6gre_net *ign = net_generic(net, ip6gre_net_id);
int dev_type = (gre_proto == htons(ETH_P_TEB) ||
- gre_proto == htons(ETH_P_ERSPAN)) ?
+ gre_proto == htons(ETH_P_ERSPAN) ||
+ gre_proto == htons(ETH_P_ERSPAN2)) ?
ARPHRD_ETHER : ARPHRD_IP6GRE;
int score, cand_score = 4;
@@ -902,6 +903,9 @@ static netdev_tx_t ip6erspan_tunnel_xmit(struct sk_buff *skb,
truncate = true;
}
+ if (skb_cow_head(skb, dev->needed_headroom))
+ goto tx_err;
+
t->parms.o_flags &= ~TUNNEL_KEY;
IPCB(skb)->flags = 0;
@@ -944,6 +948,8 @@ static netdev_tx_t ip6erspan_tunnel_xmit(struct sk_buff *skb,
md->u.md2.dir,
get_hwid(&md->u.md2),
truncate, false);
+ } else {
+ goto tx_err;
}
} else {
switch (skb->protocol) {
diff --git a/net/ipv6/ndisc.c b/net/ipv6/ndisc.c
index f61a5b613b52b0..ba5e04c6ae17df 100644
--- a/net/ipv6/ndisc.c
+++ b/net/ipv6/ndisc.c
@@ -1554,7 +1554,8 @@ static void ndisc_fill_redirect_hdr_option(struct sk_buff *skb,
*(opt++) = (rd_len >> 3);
opt += 6;
- memcpy(opt, ipv6_hdr(orig_skb), rd_len - 8);
+ skb_copy_bits(orig_skb, skb_network_offset(orig_skb), opt,
+ rd_len - 8);
}
void ndisc_send_redirect(struct sk_buff *skb, const struct in6_addr *target)
diff --git a/net/ipv6/route.c b/net/ipv6/route.c
index 9dcfadddd80055..b0d5c64e19780c 100644
--- a/net/ipv6/route.c
+++ b/net/ipv6/route.c
@@ -128,7 +128,7 @@ struct uncached_list {
static DEFINE_PER_CPU_ALIGNED(struct uncached_list, rt6_uncached_list);
-static void rt6_uncached_list_add(struct rt6_info *rt)
+void rt6_uncached_list_add(struct rt6_info *rt)
{
struct uncached_list *ul = raw_cpu_ptr(&rt6_uncached_list);
@@ -139,7 +139,7 @@ static void rt6_uncached_list_add(struct rt6_info *rt)
spin_unlock_bh(&ul->lock);
}
-static void rt6_uncached_list_del(struct rt6_info *rt)
+void rt6_uncached_list_del(struct rt6_info *rt)
{
if (!list_empty(&rt->rt6i_uncached)) {
struct uncached_list *ul = rt->rt6i_uncached_list;
@@ -1509,7 +1509,30 @@ static void rt6_exceptions_remove_prefsrc(struct rt6_info *rt)
}
}
-static void rt6_exceptions_update_pmtu(struct rt6_info *rt, int mtu)
+static bool rt6_mtu_change_route_allowed(struct inet6_dev *idev,
+ struct rt6_info *rt, int mtu)
+{
+ /* If the new MTU is lower than the route PMTU, this new MTU will be the
+ * lowest MTU in the path: always allow updating the route PMTU to
+ * reflect PMTU decreases.
+ *
+ * If the new MTU is higher, and the route PMTU is equal to the local
+ * MTU, this means the old MTU is the lowest in the path, so allow
+ * updating it: if other nodes now have lower MTUs, PMTU discovery will
+ * handle this.
+ */
+
+ if (dst_mtu(&rt->dst) >= mtu)
+ return true;
+
+ if (dst_mtu(&rt->dst) == idev->cnf.mtu6)
+ return true;
+
+ return false;
+}
+
+static void rt6_exceptions_update_pmtu(struct inet6_dev *idev,
+ struct rt6_info *rt, int mtu)
{
struct rt6_exception_bucket *bucket;
struct rt6_exception *rt6_ex;
@@ -1518,20 +1541,22 @@ static void rt6_exceptions_update_pmtu(struct rt6_info *rt, int mtu)
bucket = rcu_dereference_protected(rt->rt6i_exception_bucket,
lockdep_is_held(&rt6_exception_lock));
- if (bucket) {
- for (i = 0; i < FIB6_EXCEPTION_BUCKET_SIZE; i++) {
- hlist_for_each_entry(rt6_ex, &bucket->chain, hlist) {
- struct rt6_info *entry = rt6_ex->rt6i;
- /* For RTF_CACHE with rt6i_pmtu == 0
- * (i.e. a redirected route),
- * the metrics of its rt->dst.from has already
- * been updated.
- */
- if (entry->rt6i_pmtu && entry->rt6i_pmtu > mtu)
- entry->rt6i_pmtu = mtu;
- }
- bucket++;
+ if (!bucket)
+ return;
+
+ for (i = 0; i < FIB6_EXCEPTION_BUCKET_SIZE; i++) {
+ hlist_for_each_entry(rt6_ex, &bucket->chain, hlist) {
+ struct rt6_info *entry = rt6_ex->rt6i;
+
+ /* For RTF_CACHE with rt6i_pmtu == 0 (i.e. a redirected
+ * route), the metrics of its rt->dst.from have already
+ * been updated.
+ */
+ if (entry->rt6i_pmtu &&
+ rt6_mtu_change_route_allowed(idev, entry, mtu))
+ entry->rt6i_pmtu = mtu;
}
+ bucket++;
}
}
@@ -3809,25 +3834,13 @@ static int rt6_mtu_change_route(struct rt6_info *rt, void *p_arg)
Since RFC 1981 doesn't include administrative MTU increase
update PMTU increase is a MUST. (i.e. jumbo frame)
*/
- /*
- If new MTU is less than route PMTU, this new MTU will be the
- lowest MTU in the path, update the route PMTU to reflect PMTU
- decreases; if new MTU is greater than route PMTU, and the
- old MTU is the lowest MTU in the path, update the route PMTU
- to reflect the increase. In this case if the other nodes' MTU
- also have the lowest MTU, TOO BIG MESSAGE will be lead to
- PMTU discovery.
- */
if (rt->dst.dev == arg->dev &&
- dst_metric_raw(&rt->dst, RTAX_MTU) &&
!dst_metric_locked(&rt->dst, RTAX_MTU)) {
spin_lock_bh(&rt6_exception_lock);
- if (dst_mtu(&rt->dst) >= arg->mtu ||
- (dst_mtu(&rt->dst) < arg->mtu &&
- dst_mtu(&rt->dst) == idev->cnf.mtu6)) {
+ if (dst_metric_raw(&rt->dst, RTAX_MTU) &&
+ rt6_mtu_change_route_allowed(idev, rt, arg->mtu))
dst_metric_set(&rt->dst, RTAX_MTU, arg->mtu);
- }
- rt6_exceptions_update_pmtu(rt, arg->mtu);
+ rt6_exceptions_update_pmtu(idev, rt, arg->mtu);
spin_unlock_bh(&rt6_exception_lock);
}
return 0;
@@ -4099,6 +4112,7 @@ static int ip6_route_multipath_add(struct fib6_config *cfg,
r_cfg.fc_encap_type = nla_get_u16(nla);
}
+ r_cfg.fc_flags |= (rtnh->rtnh_flags & RTNH_F_ONLINK);
rt = ip6_route_info_create(&r_cfg, extack);
if (IS_ERR(rt)) {
err = PTR_ERR(rt);
diff --git a/net/ipv6/seg6_iptunnel.c b/net/ipv6/seg6_iptunnel.c
index bd6cc688bd199a..7a78dcfda68a17 100644
--- a/net/ipv6/seg6_iptunnel.c
+++ b/net/ipv6/seg6_iptunnel.c
@@ -93,7 +93,8 @@ static void set_tun_src(struct net *net, struct net_device *dev,
/* encapsulate an IPv6 packet within an outer IPv6 header with a given SRH */
int seg6_do_srh_encap(struct sk_buff *skb, struct ipv6_sr_hdr *osrh, int proto)
{
- struct net *net = dev_net(skb_dst(skb)->dev);
+ struct dst_entry *dst = skb_dst(skb);
+ struct net *net = dev_net(dst->dev);
struct ipv6hdr *hdr, *inner_hdr;
struct ipv6_sr_hdr *isrh;
int hdrlen, tot_len, err;
@@ -134,7 +135,7 @@ int seg6_do_srh_encap(struct sk_buff *skb, struct ipv6_sr_hdr *osrh, int proto)
isrh->nexthdr = proto;
hdr->daddr = isrh->segments[isrh->first_segment];
- set_tun_src(net, skb->dev, &hdr->daddr, &hdr->saddr);
+ set_tun_src(net, ip6_dst_idev(dst)->dev, &hdr->daddr, &hdr->saddr);
#ifdef CONFIG_IPV6_SEG6_HMAC
if (sr_has_hmac(isrh)) {
@@ -418,7 +419,7 @@ static int seg6_build_state(struct nlattr *nla,
slwt = seg6_lwt_lwtunnel(newts);
- err = dst_cache_init(&slwt->cache, GFP_KERNEL);
+ err = dst_cache_init(&slwt->cache, GFP_ATOMIC);
if (err) {
kfree(newts);
return err;
diff --git a/net/ipv6/xfrm6_mode_tunnel.c b/net/ipv6/xfrm6_mode_tunnel.c
index bb935a3b7feada..de1b0b8c53b0ba 100644
--- a/net/ipv6/xfrm6_mode_tunnel.c
+++ b/net/ipv6/xfrm6_mode_tunnel.c
@@ -92,7 +92,8 @@ static int xfrm6_mode_tunnel_input(struct xfrm_state *x, struct sk_buff *skb)
skb_reset_network_header(skb);
skb_mac_header_rebuild(skb);
- eth_hdr(skb)->h_proto = skb->protocol;
+ if (skb->mac_len)
+ eth_hdr(skb)->h_proto = skb->protocol;
err = 0;
diff --git a/net/ipv6/xfrm6_policy.c b/net/ipv6/xfrm6_policy.c
index 09fb44ee3b45d7..416fe67271a920 100644
--- a/net/ipv6/xfrm6_policy.c
+++ b/net/ipv6/xfrm6_policy.c
@@ -113,6 +113,9 @@ static int xfrm6_fill_dst(struct xfrm_dst *xdst, struct net_device *dev,
xdst->u.rt6.rt6i_gateway = rt->rt6i_gateway;
xdst->u.rt6.rt6i_dst = rt->rt6i_dst;
xdst->u.rt6.rt6i_src = rt->rt6i_src;
+ INIT_LIST_HEAD(&xdst->u.rt6.rt6i_uncached);
+ rt6_uncached_list_add(&xdst->u.rt6);
+ atomic_inc(&dev_net(dev)->ipv6.rt6_stats->fib_rt_uncache);
return 0;
}
@@ -244,6 +247,8 @@ static void xfrm6_dst_destroy(struct dst_entry *dst)
if (likely(xdst->u.rt6.rt6i_idev))
in6_dev_put(xdst->u.rt6.rt6i_idev);
dst_destroy_metrics_generic(dst);
+ if (xdst->u.rt6.rt6i_uncached_list)
+ rt6_uncached_list_del(&xdst->u.rt6);
xfrm_dst_destroy(xdst);
}
diff --git a/net/iucv/af_iucv.c b/net/iucv/af_iucv.c
index 1e8cc7bcbca3a4..9e2643ab4ccbfc 100644
--- a/net/iucv/af_iucv.c
+++ b/net/iucv/af_iucv.c
@@ -2433,9 +2433,11 @@ static int afiucv_iucv_init(void)
af_iucv_dev->driver = &af_iucv_driver;
err = device_register(af_iucv_dev);
if (err)
- goto out_driver;
+ goto out_iucv_dev;
return 0;
+out_iucv_dev:
+ put_device(af_iucv_dev);
out_driver:
driver_unregister(&af_iucv_driver);
out_iucv:
diff --git a/net/kcm/kcmsock.c b/net/kcm/kcmsock.c
index f297d53a11aa0b..34355fd19f27dc 100644
--- a/net/kcm/kcmsock.c
+++ b/net/kcm/kcmsock.c
@@ -1381,24 +1381,32 @@ static int kcm_attach(struct socket *sock, struct socket *csock,
.parse_msg = kcm_parse_func_strparser,
.read_sock_done = kcm_read_sock_done,
};
- int err;
+ int err = 0;
csk = csock->sk;
if (!csk)
return -EINVAL;
+ lock_sock(csk);
+
/* Only allow TCP sockets to be attached for now */
if ((csk->sk_family != AF_INET && csk->sk_family != AF_INET6) ||
- csk->sk_protocol != IPPROTO_TCP)
- return -EOPNOTSUPP;
+ csk->sk_protocol != IPPROTO_TCP) {
+ err = -EOPNOTSUPP;
+ goto out;
+ }
/* Don't allow listeners or closed sockets */
- if (csk->sk_state == TCP_LISTEN || csk->sk_state == TCP_CLOSE)
- return -EOPNOTSUPP;
+ if (csk->sk_state == TCP_LISTEN || csk->sk_state == TCP_CLOSE) {
+ err = -EOPNOTSUPP;
+ goto out;
+ }
psock = kmem_cache_zalloc(kcm_psockp, GFP_KERNEL);
- if (!psock)
- return -ENOMEM;
+ if (!psock) {
+ err = -ENOMEM;
+ goto out;
+ }
psock->mux = mux;
psock->sk = csk;
@@ -1407,7 +1415,7 @@ static int kcm_attach(struct socket *sock, struct socket *csock,
err = strp_init(&psock->strp, csk, &cb);
if (err) {
kmem_cache_free(kcm_psockp, psock);
- return err;
+ goto out;
}
write_lock_bh(&csk->sk_callback_lock);
@@ -1419,7 +1427,8 @@ static int kcm_attach(struct socket *sock, struct socket *csock,
write_unlock_bh(&csk->sk_callback_lock);
strp_done(&psock->strp);
kmem_cache_free(kcm_psockp, psock);
- return -EALREADY;
+ err = -EALREADY;
+ goto out;
}
psock->save_data_ready = csk->sk_data_ready;
@@ -1455,7 +1464,10 @@ static int kcm_attach(struct socket *sock, struct socket *csock,
/* Schedule RX work in case there are already bytes queued */
strp_check_rcv(&psock->strp);
- return 0;
+out:
+ release_sock(csk);
+
+ return err;
}
static int kcm_attach_ioctl(struct socket *sock, struct kcm_attach *info)
@@ -1507,6 +1519,7 @@ static void kcm_unattach(struct kcm_psock *psock)
if (WARN_ON(psock->rx_kcm)) {
write_unlock_bh(&csk->sk_callback_lock);
+ release_sock(csk);
return;
}
diff --git a/net/l2tp/l2tp_core.c b/net/l2tp/l2tp_core.c
index 83421c6f0bef1c..14b67dfacc4b48 100644
--- a/net/l2tp/l2tp_core.c
+++ b/net/l2tp/l2tp_core.c
@@ -111,6 +111,13 @@ struct l2tp_net {
spinlock_t l2tp_session_hlist_lock;
};
+#if IS_ENABLED(CONFIG_IPV6)
+static bool l2tp_sk_is_v6(struct sock *sk)
+{
+ return sk->sk_family == PF_INET6 &&
+ !ipv6_addr_v4mapped(&sk->sk_v6_daddr);
+}
+#endif
static inline struct l2tp_tunnel *l2tp_tunnel(struct sock *sk)
{
@@ -1049,7 +1056,7 @@ static int l2tp_xmit_core(struct l2tp_session *session, struct sk_buff *skb,
/* Queue the packet to IP for output */
skb->ignore_df = 1;
#if IS_ENABLED(CONFIG_IPV6)
- if (tunnel->sock->sk_family == PF_INET6 && !tunnel->v4mapped)
+ if (l2tp_sk_is_v6(tunnel->sock))
error = inet6_csk_xmit(tunnel->sock, skb, NULL);
else
#endif
@@ -1112,6 +1119,15 @@ int l2tp_xmit_skb(struct l2tp_session *session, struct sk_buff *skb, int hdr_len
goto out_unlock;
}
+ /* The user-space may change the connection status for the user-space
+ * provided socket at run time: we must check it under the socket lock
+ */
+ if (tunnel->fd >= 0 && sk->sk_state != TCP_ESTABLISHED) {
+ kfree_skb(skb);
+ ret = NET_XMIT_DROP;
+ goto out_unlock;
+ }
+
/* Get routing info from the tunnel socket */
skb_dst_drop(skb);
skb_dst_set(skb, dst_clone(__sk_dst_check(sk, 0)));
@@ -1131,7 +1147,7 @@ int l2tp_xmit_skb(struct l2tp_session *session, struct sk_buff *skb, int hdr_len
/* Calculate UDP checksum if configured to do so */
#if IS_ENABLED(CONFIG_IPV6)
- if (sk->sk_family == PF_INET6 && !tunnel->v4mapped)
+ if (l2tp_sk_is_v6(sk))
udp6_set_csum(udp_get_no_check6_tx(sk),
skb, &inet6_sk(sk)->saddr,
&sk->sk_v6_daddr, udp_len);
@@ -1457,9 +1473,14 @@ int l2tp_tunnel_create(struct net *net, int fd, int version, u32 tunnel_id, u32
encap = cfg->encap;
/* Quick sanity checks */
+ err = -EPROTONOSUPPORT;
+ if (sk->sk_type != SOCK_DGRAM) {
+ pr_debug("tunl %hu: fd %d wrong socket type\n",
+ tunnel_id, fd);
+ goto err;
+ }
switch (encap) {
case L2TP_ENCAPTYPE_UDP:
- err = -EPROTONOSUPPORT;
if (sk->sk_protocol != IPPROTO_UDP) {
pr_err("tunl %hu: fd %d wrong protocol, got %d, expected %d\n",
tunnel_id, fd, sk->sk_protocol, IPPROTO_UDP);
@@ -1467,7 +1488,6 @@ int l2tp_tunnel_create(struct net *net, int fd, int version, u32 tunnel_id, u32
}
break;
case L2TP_ENCAPTYPE_IP:
- err = -EPROTONOSUPPORT;
if (sk->sk_protocol != IPPROTO_L2TP) {
pr_err("tunl %hu: fd %d wrong protocol, got %d, expected %d\n",
tunnel_id, fd, sk->sk_protocol, IPPROTO_L2TP);
@@ -1507,24 +1527,6 @@ int l2tp_tunnel_create(struct net *net, int fd, int version, u32 tunnel_id, u32
if (cfg != NULL)
tunnel->debug = cfg->debug;
-#if IS_ENABLED(CONFIG_IPV6)
- if (sk->sk_family == PF_INET6) {
- struct ipv6_pinfo *np = inet6_sk(sk);
-
- if (ipv6_addr_v4mapped(&np->saddr) &&
- ipv6_addr_v4mapped(&sk->sk_v6_daddr)) {
- struct inet_sock *inet = inet_sk(sk);
-
- tunnel->v4mapped = true;
- inet->inet_saddr = np->saddr.s6_addr32[3];
- inet->inet_rcv_saddr = sk->sk_v6_rcv_saddr.s6_addr32[3];
- inet->inet_daddr = sk->sk_v6_daddr.s6_addr32[3];
- } else {
- tunnel->v4mapped = false;
- }
- }
-#endif
-
/* Mark socket as an encapsulation socket. See net/ipv4/udp.c */
tunnel->encap = encap;
if (encap == L2TP_ENCAPTYPE_UDP) {
diff --git a/net/l2tp/l2tp_core.h b/net/l2tp/l2tp_core.h
index a1aa9550f04e2c..2718d0b284d040 100644
--- a/net/l2tp/l2tp_core.h
+++ b/net/l2tp/l2tp_core.h
@@ -188,9 +188,6 @@ struct l2tp_tunnel {
struct sock *sock; /* Parent socket */
int fd; /* Parent fd, if tunnel socket
* was created by userspace */
-#if IS_ENABLED(CONFIG_IPV6)
- bool v4mapped;
-#endif
struct work_struct del_work;
diff --git a/net/mac80211/debugfs.c b/net/mac80211/debugfs.c
index 1f466d12a6bcd6..94c7ee9df33b63 100644
--- a/net/mac80211/debugfs.c
+++ b/net/mac80211/debugfs.c
@@ -212,6 +212,7 @@ static const char *hw_flag_names[] = {
FLAG(REPORTS_LOW_ACK),
FLAG(SUPPORTS_TX_FRAG),
FLAG(SUPPORTS_TDLS_BUFFER_STA),
+ FLAG(DOESNT_SUPPORT_QOS_NDP),
#undef FLAG
};
diff --git a/net/mac80211/mlme.c b/net/mac80211/mlme.c
index 39b660b9a90894..5f303abac5ad6e 100644
--- a/net/mac80211/mlme.c
+++ b/net/mac80211/mlme.c
@@ -896,7 +896,8 @@ void ieee80211_send_nullfunc(struct ieee80211_local *local,
struct ieee80211_hdr_3addr *nullfunc;
struct ieee80211_if_managed *ifmgd = &sdata->u.mgd;
- skb = ieee80211_nullfunc_get(&local->hw, &sdata->vif, true);
+ skb = ieee80211_nullfunc_get(&local->hw, &sdata->vif,
+ !ieee80211_hw_check(&local->hw, DOESNT_SUPPORT_QOS_NDP));
if (!skb)
return;
diff --git a/net/netfilter/nf_tables_api.c b/net/netfilter/nf_tables_api.c
index 558593e6a0a3b5..c4acc7340eb101 100644
--- a/net/netfilter/nf_tables_api.c
+++ b/net/netfilter/nf_tables_api.c
@@ -5423,6 +5423,7 @@ err:
static void nf_tables_flowtable_destroy(struct nft_flowtable *flowtable)
{
cancel_delayed_work_sync(&flowtable->data.gc_work);
+ kfree(flowtable->ops);
kfree(flowtable->name);
flowtable->data.type->free(&flowtable->data);
rhashtable_destroy(&flowtable->data.rhashtable);
diff --git a/net/netfilter/nft_set_hash.c b/net/netfilter/nft_set_hash.c
index 3f1624ee056f96..d40591fe1b2f64 100644
--- a/net/netfilter/nft_set_hash.c
+++ b/net/netfilter/nft_set_hash.c
@@ -674,7 +674,7 @@ static const struct nft_set_ops *
nft_hash_select_ops(const struct nft_ctx *ctx, const struct nft_set_desc *desc,
u32 flags)
{
- if (desc->size) {
+ if (desc->size && !(flags & NFT_SET_TIMEOUT)) {
switch (desc->klen) {
case 4:
return &nft_hash_fast_ops;
diff --git a/net/netfilter/x_tables.c b/net/netfilter/x_tables.c
index fa1655aff8d3fe..4aa01c90e9d1ed 100644
--- a/net/netfilter/x_tables.c
+++ b/net/netfilter/x_tables.c
@@ -423,6 +423,36 @@ textify_hooks(char *buf, size_t size, unsigned int mask, uint8_t nfproto)
return buf;
}
+/**
+ * xt_check_proc_name - check that name is suitable for /proc file creation
+ *
+ * @name: file name candidate
+ * @size: length of buffer
+ *
+ * some x_tables modules wish to create a file in /proc.
+ * This function makes sure that the name is suitable for this
+ * purpose, it checks that name is NUL terminated and isn't a 'special'
+ * name, like "..".
+ *
+ * returns negative number on error or 0 if name is useable.
+ */
+int xt_check_proc_name(const char *name, unsigned int size)
+{
+ if (name[0] == '\0')
+ return -EINVAL;
+
+ if (strnlen(name, size) == size)
+ return -ENAMETOOLONG;
+
+ if (strcmp(name, ".") == 0 ||
+ strcmp(name, "..") == 0 ||
+ strchr(name, '/'))
+ return -EINVAL;
+
+ return 0;
+}
+EXPORT_SYMBOL(xt_check_proc_name);
+
int xt_check_match(struct xt_mtchk_param *par,
unsigned int size, u_int8_t proto, bool inv_proto)
{
diff --git a/net/netfilter/xt_hashlimit.c b/net/netfilter/xt_hashlimit.c
index 66f5aca62a087e..3360f13dc208b6 100644
--- a/net/netfilter/xt_hashlimit.c
+++ b/net/netfilter/xt_hashlimit.c
@@ -917,8 +917,9 @@ static int hashlimit_mt_check_v1(const struct xt_mtchk_param *par)
struct hashlimit_cfg3 cfg = {};
int ret;
- if (info->name[sizeof(info->name) - 1] != '\0')
- return -EINVAL;
+ ret = xt_check_proc_name(info->name, sizeof(info->name));
+ if (ret)
+ return ret;
ret = cfg_copy(&cfg, (void *)&info->cfg, 1);
@@ -935,8 +936,9 @@ static int hashlimit_mt_check_v2(const struct xt_mtchk_param *par)
struct hashlimit_cfg3 cfg = {};
int ret;
- if (info->name[sizeof(info->name) - 1] != '\0')
- return -EINVAL;
+ ret = xt_check_proc_name(info->name, sizeof(info->name));
+ if (ret)
+ return ret;
ret = cfg_copy(&cfg, (void *)&info->cfg, 2);
@@ -950,9 +952,11 @@ static int hashlimit_mt_check_v2(const struct xt_mtchk_param *par)
static int hashlimit_mt_check(const struct xt_mtchk_param *par)
{
struct xt_hashlimit_mtinfo3 *info = par->matchinfo;
+ int ret;
- if (info->name[sizeof(info->name) - 1] != '\0')
- return -EINVAL;
+ ret = xt_check_proc_name(info->name, sizeof(info->name));
+ if (ret)
+ return ret;
return hashlimit_mt_check_common(par, &info->hinfo, &info->cfg,
info->name, 3);
diff --git a/net/netfilter/xt_recent.c b/net/netfilter/xt_recent.c
index 6d232d18faff72..81ee1d6543b269 100644
--- a/net/netfilter/xt_recent.c
+++ b/net/netfilter/xt_recent.c
@@ -361,9 +361,9 @@ static int recent_mt_check(const struct xt_mtchk_param *par,
info->hit_count, XT_RECENT_MAX_NSTAMPS - 1);
return -EINVAL;
}
- if (info->name[0] == '\0' ||
- strnlen(info->name, XT_RECENT_NAME_LEN) == XT_RECENT_NAME_LEN)
- return -EINVAL;
+ ret = xt_check_proc_name(info->name, sizeof(info->name));
+ if (ret)
+ return ret;
if (ip_pkt_list_tot && info->hit_count < ip_pkt_list_tot)
nstamp_mask = roundup_pow_of_two(ip_pkt_list_tot) - 1;
diff --git a/net/netlink/genetlink.c b/net/netlink/genetlink.c
index 6f02499ef00725..b9ce82c9440f1c 100644
--- a/net/netlink/genetlink.c
+++ b/net/netlink/genetlink.c
@@ -1106,7 +1106,7 @@ static int genlmsg_mcast(struct sk_buff *skb, u32 portid, unsigned long group,
if (!err)
delivered = true;
else if (err != -ESRCH)
- goto error;
+ return err;
return delivered ? 0 : -ESRCH;
error:
kfree_skb(skb);
diff --git a/net/openvswitch/meter.c b/net/openvswitch/meter.c
index 04b94281a30b2b..b891a91577f803 100644
--- a/net/openvswitch/meter.c
+++ b/net/openvswitch/meter.c
@@ -242,14 +242,20 @@ static struct dp_meter *dp_meter_create(struct nlattr **a)
band->type = nla_get_u32(attr[OVS_BAND_ATTR_TYPE]);
band->rate = nla_get_u32(attr[OVS_BAND_ATTR_RATE]);
+ if (band->rate == 0) {
+ err = -EINVAL;
+ goto exit_free_meter;
+ }
+
band->burst_size = nla_get_u32(attr[OVS_BAND_ATTR_BURST]);
/* Figure out max delta_t that is enough to fill any bucket.
* Keep max_delta_t size to the bucket units:
* pkts => 1/1000 packets, kilobits => bits.
+ *
+ * Start with a full bucket.
*/
- band_max_delta_t = (band->burst_size + band->rate) * 1000;
- /* Start with a full bucket. */
- band->bucket = band_max_delta_t;
+ band->bucket = (band->burst_size + band->rate) * 1000;
+ band_max_delta_t = band->bucket / band->rate;
if (band_max_delta_t > meter->max_delta_t)
meter->max_delta_t = band_max_delta_t;
band++;
diff --git a/net/sched/act_bpf.c b/net/sched/act_bpf.c
index b3f2c15affa7b2..9d2cabf1dc7ea5 100644
--- a/net/sched/act_bpf.c
+++ b/net/sched/act_bpf.c
@@ -352,7 +352,7 @@ static int tcf_bpf_init(struct net *net, struct nlattr *nla,
return res;
out:
if (res == ACT_P_CREATED)
- tcf_idr_cleanup(*act, est);
+ tcf_idr_release(*act, bind);
return ret;
}
diff --git a/net/sched/act_csum.c b/net/sched/act_csum.c
index b7ba9b06b147ed..2a5c8fd860cf1e 100644
--- a/net/sched/act_csum.c
+++ b/net/sched/act_csum.c
@@ -350,7 +350,7 @@ static int tcf_csum_sctp(struct sk_buff *skb, unsigned int ihl,
{
struct sctphdr *sctph;
- if (skb_is_gso(skb) && skb_shinfo(skb)->gso_type & SKB_GSO_SCTP)
+ if (skb_is_gso(skb) && skb_is_gso_sctp(skb))
return 1;
sctph = tcf_csum_skb_nextlayer(skb, ihl, ipl, sizeof(*sctph));
@@ -626,7 +626,8 @@ static void tcf_csum_cleanup(struct tc_action *a)
struct tcf_csum_params *params;
params = rcu_dereference_protected(p->params, 1);
- kfree_rcu(params, rcu);
+ if (params)
+ kfree_rcu(params, rcu);
}
static int tcf_csum_walker(struct net *net, struct sk_buff *skb,
diff --git a/net/sched/act_ipt.c b/net/sched/act_ipt.c
index 06e380ae09283f..7e06b9b626134d 100644
--- a/net/sched/act_ipt.c
+++ b/net/sched/act_ipt.c
@@ -80,9 +80,12 @@ static void ipt_destroy_target(struct xt_entry_target *t)
static void tcf_ipt_release(struct tc_action *a)
{
struct tcf_ipt *ipt = to_ipt(a);
- ipt_destroy_target(ipt->tcfi_t);
+
+ if (ipt->tcfi_t) {
+ ipt_destroy_target(ipt->tcfi_t);
+ kfree(ipt->tcfi_t);
+ }
kfree(ipt->tcfi_tname);
- kfree(ipt->tcfi_t);
}
static const struct nla_policy ipt_policy[TCA_IPT_MAX + 1] = {
@@ -187,7 +190,7 @@ err2:
kfree(tname);
err1:
if (ret == ACT_P_CREATED)
- tcf_idr_cleanup(*a, est);
+ tcf_idr_release(*a, bind);
return err;
}
diff --git a/net/sched/act_pedit.c b/net/sched/act_pedit.c
index 349beaffb29e4a..fef08835f26d9c 100644
--- a/net/sched/act_pedit.c
+++ b/net/sched/act_pedit.c
@@ -176,7 +176,7 @@ static int tcf_pedit_init(struct net *net, struct nlattr *nla,
p = to_pedit(*a);
keys = kmalloc(ksize, GFP_KERNEL);
if (keys == NULL) {
- tcf_idr_cleanup(*a, est);
+ tcf_idr_release(*a, bind);
kfree(keys_ex);
return -ENOMEM;
}
diff --git a/net/sched/act_police.c b/net/sched/act_police.c
index 95d3c9097b2516..faebf82b99f199 100644
--- a/net/sched/act_police.c
+++ b/net/sched/act_police.c
@@ -194,7 +194,7 @@ failure:
qdisc_put_rtab(P_tab);
qdisc_put_rtab(R_tab);
if (ret == ACT_P_CREATED)
- tcf_idr_cleanup(*a, est);
+ tcf_idr_release(*a, bind);
return err;
}
diff --git a/net/sched/act_sample.c b/net/sched/act_sample.c
index 1ba0df23875624..74c5d7e6a0fac0 100644
--- a/net/sched/act_sample.c
+++ b/net/sched/act_sample.c
@@ -103,7 +103,8 @@ static void tcf_sample_cleanup(struct tc_action *a)
psample_group = rtnl_dereference(s->psample_group);
RCU_INIT_POINTER(s->psample_group, NULL);
- psample_group_put(psample_group);
+ if (psample_group)
+ psample_group_put(psample_group);
}
static bool tcf_sample_dev_ok_push(struct net_device *dev)
diff --git a/net/sched/act_simple.c b/net/sched/act_simple.c
index 425eac11f6da08..b1f38063ada09e 100644
--- a/net/sched/act_simple.c
+++ b/net/sched/act_simple.c
@@ -121,7 +121,7 @@ static int tcf_simp_init(struct net *net, struct nlattr *nla,
d = to_defact(*a);
ret = alloc_defdata(d, defdata);
if (ret < 0) {
- tcf_idr_cleanup(*a, est);
+ tcf_idr_release(*a, bind);
return ret;
}
d->tcf_action = parm->action;
diff --git a/net/sched/act_skbmod.c b/net/sched/act_skbmod.c
index fa975262dbacf1..7b0700f52b505c 100644
--- a/net/sched/act_skbmod.c
+++ b/net/sched/act_skbmod.c
@@ -152,7 +152,7 @@ static int tcf_skbmod_init(struct net *net, struct nlattr *nla,
ASSERT_RTNL();
p = kzalloc(sizeof(struct tcf_skbmod_params), GFP_KERNEL);
if (unlikely(!p)) {
- if (ovr)
+ if (ret == ACT_P_CREATED)
tcf_idr_release(*a, bind);
return -ENOMEM;
}
@@ -190,7 +190,8 @@ static void tcf_skbmod_cleanup(struct tc_action *a)
struct tcf_skbmod_params *p;
p = rcu_dereference_protected(d->skbmod_p, 1);
- kfree_rcu(p, rcu);
+ if (p)
+ kfree_rcu(p, rcu);
}
static int tcf_skbmod_dump(struct sk_buff *skb, struct tc_action *a,
diff --git a/net/sched/act_tunnel_key.c b/net/sched/act_tunnel_key.c
index 0e23aac09ad63a..1281ca463727a3 100644
--- a/net/sched/act_tunnel_key.c
+++ b/net/sched/act_tunnel_key.c
@@ -153,6 +153,7 @@ static int tunnel_key_init(struct net *net, struct nlattr *nla,
metadata->u.tun_info.mode |= IP_TUNNEL_INFO_TX;
break;
default:
+ ret = -EINVAL;
goto err_out;
}
@@ -207,11 +208,12 @@ static void tunnel_key_release(struct tc_action *a)
struct tcf_tunnel_key_params *params;
params = rcu_dereference_protected(t->params, 1);
+ if (params) {
+ if (params->tcft_action == TCA_TUNNEL_KEY_ACT_SET)
+ dst_release(&params->tcft_enc_metadata->dst);
- if (params->tcft_action == TCA_TUNNEL_KEY_ACT_SET)
- dst_release(&params->tcft_enc_metadata->dst);
-
- kfree_rcu(params, rcu);
+ kfree_rcu(params, rcu);
+ }
}
static int tunnel_key_dump_addresses(struct sk_buff *skb,
diff --git a/net/sched/act_vlan.c b/net/sched/act_vlan.c
index e1a1b3f3983a61..c49cb61adedffc 100644
--- a/net/sched/act_vlan.c
+++ b/net/sched/act_vlan.c
@@ -195,7 +195,7 @@ static int tcf_vlan_init(struct net *net, struct nlattr *nla,
ASSERT_RTNL();
p = kzalloc(sizeof(*p), GFP_KERNEL);
if (!p) {
- if (ovr)
+ if (ret == ACT_P_CREATED)
tcf_idr_release(*a, bind);
return -ENOMEM;
}
@@ -225,7 +225,8 @@ static void tcf_vlan_cleanup(struct tc_action *a)
struct tcf_vlan_params *p;
p = rcu_dereference_protected(v->vlan_p, 1);
- kfree_rcu(p, rcu);
+ if (p)
+ kfree_rcu(p, rcu);
}
static int tcf_vlan_dump(struct sk_buff *skb, struct tc_action *a,
diff --git a/net/sched/sch_generic.c b/net/sched/sch_generic.c
index 190570f21b208d..7e3fbe9cc936be 100644
--- a/net/sched/sch_generic.c
+++ b/net/sched/sch_generic.c
@@ -106,6 +106,14 @@ static inline void qdisc_enqueue_skb_bad_txq(struct Qdisc *q,
__skb_queue_tail(&q->skb_bad_txq, skb);
+ if (qdisc_is_percpu_stats(q)) {
+ qdisc_qstats_cpu_backlog_inc(q, skb);
+ qdisc_qstats_cpu_qlen_inc(q);
+ } else {
+ qdisc_qstats_backlog_inc(q, skb);
+ q->q.qlen++;
+ }
+
if (lock)
spin_unlock(lock);
}
@@ -196,14 +204,6 @@ static void try_bulk_dequeue_skb_slow(struct Qdisc *q,
break;
if (unlikely(skb_get_queue_mapping(nskb) != mapping)) {
qdisc_enqueue_skb_bad_txq(q, nskb);
-
- if (qdisc_is_percpu_stats(q)) {
- qdisc_qstats_cpu_backlog_inc(q, nskb);
- qdisc_qstats_cpu_qlen_inc(q);
- } else {
- qdisc_qstats_backlog_inc(q, nskb);
- q->q.qlen++;
- }
break;
}
skb->next = nskb;
@@ -628,6 +628,7 @@ static int pfifo_fast_enqueue(struct sk_buff *skb, struct Qdisc *qdisc,
int band = prio2band[skb->priority & TC_PRIO_MAX];
struct pfifo_fast_priv *priv = qdisc_priv(qdisc);
struct skb_array *q = band2list(priv, band);
+ unsigned int pkt_len = qdisc_pkt_len(skb);
int err;
err = skb_array_produce(q, skb);
@@ -636,7 +637,10 @@ static int pfifo_fast_enqueue(struct sk_buff *skb, struct Qdisc *qdisc,
return qdisc_drop_cpu(skb, qdisc, to_free);
qdisc_qstats_cpu_qlen_inc(qdisc);
- qdisc_qstats_cpu_backlog_inc(qdisc, skb);
+ /* Note: skb can not be used after skb_array_produce(),
+ * so we better not use qdisc_qstats_cpu_backlog_inc()
+ */
+ this_cpu_add(qdisc->cpu_qstats->backlog, pkt_len);
return NET_XMIT_SUCCESS;
}
diff --git a/net/sched/sch_netem.c b/net/sched/sch_netem.c
index 7c179addebcd29..7d6801fc5340ef 100644
--- a/net/sched/sch_netem.c
+++ b/net/sched/sch_netem.c
@@ -509,7 +509,7 @@ static int netem_enqueue(struct sk_buff *skb, struct Qdisc *sch,
}
if (unlikely(sch->q.qlen >= sch->limit))
- return qdisc_drop(skb, sch, to_free);
+ return qdisc_drop_all(skb, sch, to_free);
qdisc_qstats_backlog_inc(sch, skb);
diff --git a/net/sctp/input.c b/net/sctp/input.c
index 0247cc432e0293..b381d78548ac73 100644
--- a/net/sctp/input.c
+++ b/net/sctp/input.c
@@ -106,6 +106,7 @@ int sctp_rcv(struct sk_buff *skb)
int family;
struct sctp_af *af;
struct net *net = dev_net(skb->dev);
+ bool is_gso = skb_is_gso(skb) && skb_is_gso_sctp(skb);
if (skb->pkt_type != PACKET_HOST)
goto discard_it;
@@ -123,8 +124,7 @@ int sctp_rcv(struct sk_buff *skb)
* it's better to just linearize it otherwise crc computing
* takes longer.
*/
- if ((!(skb_shinfo(skb)->gso_type & SKB_GSO_SCTP) &&
- skb_linearize(skb)) ||
+ if ((!is_gso && skb_linearize(skb)) ||
!pskb_may_pull(skb, sizeof(struct sctphdr)))
goto discard_it;
@@ -135,7 +135,7 @@ int sctp_rcv(struct sk_buff *skb)
if (skb_csum_unnecessary(skb))
__skb_decr_checksum_unnecessary(skb);
else if (!sctp_checksum_disable &&
- !(skb_shinfo(skb)->gso_type & SKB_GSO_SCTP) &&
+ !is_gso &&
sctp_rcv_checksum(net, skb) < 0)
goto discard_it;
skb->csum_valid = 1;
@@ -1218,7 +1218,7 @@ static struct sctp_association *__sctp_rcv_lookup_harder(struct net *net,
* issue as packets hitting this are mostly INIT or INIT-ACK and
* those cannot be on GSO-style anyway.
*/
- if ((skb_shinfo(skb)->gso_type & SKB_GSO_SCTP) == SKB_GSO_SCTP)
+ if (skb_is_gso(skb) && skb_is_gso_sctp(skb))
return NULL;
ch = (struct sctp_chunkhdr *)skb->data;
diff --git a/net/sctp/inqueue.c b/net/sctp/inqueue.c
index 48392552ee7c1e..23ebc5318edc47 100644
--- a/net/sctp/inqueue.c
+++ b/net/sctp/inqueue.c
@@ -170,7 +170,7 @@ next_chunk:
chunk = list_entry(entry, struct sctp_chunk, list);
- if ((skb_shinfo(chunk->skb)->gso_type & SKB_GSO_SCTP) == SKB_GSO_SCTP) {
+ if (skb_is_gso(chunk->skb) && skb_is_gso_sctp(chunk->skb)) {
/* GSO-marked skbs but without frags, handle
* them normally
*/
diff --git a/net/sctp/offload.c b/net/sctp/offload.c
index 35bc7106d1827a..123e9f2dc22652 100644
--- a/net/sctp/offload.c
+++ b/net/sctp/offload.c
@@ -45,7 +45,7 @@ static struct sk_buff *sctp_gso_segment(struct sk_buff *skb,
struct sk_buff *segs = ERR_PTR(-EINVAL);
struct sctphdr *sh;
- if (!(skb_shinfo(skb)->gso_type & SKB_GSO_SCTP))
+ if (!skb_is_gso_sctp(skb))
goto out;
sh = sctp_hdr(skb);
diff --git a/net/smc/af_smc.c b/net/smc/af_smc.c
index 8cc97834d4f647..1e0d780855c31f 100644
--- a/net/smc/af_smc.c
+++ b/net/smc/af_smc.c
@@ -978,10 +978,6 @@ out:
lsmc->clcsock = NULL;
}
release_sock(lsk);
- /* no more listening, wake up smc_close_wait_listen_clcsock and
- * accept
- */
- lsk->sk_state_change(lsk);
sock_put(&lsmc->sk); /* sock_hold in smc_listen */
}
diff --git a/net/smc/smc_close.c b/net/smc/smc_close.c
index e339c0186dcfc2..fa41d988174146 100644
--- a/net/smc/smc_close.c
+++ b/net/smc/smc_close.c
@@ -30,27 +30,6 @@ static void smc_close_cleanup_listen(struct sock *parent)
smc_close_non_accepted(sk);
}
-static void smc_close_wait_listen_clcsock(struct smc_sock *smc)
-{
- DEFINE_WAIT_FUNC(wait, woken_wake_function);
- struct sock *sk = &smc->sk;
- signed long timeout;
-
- timeout = SMC_CLOSE_WAIT_LISTEN_CLCSOCK_TIME;
- add_wait_queue(sk_sleep(sk), &wait);
- do {
- release_sock(sk);
- if (smc->clcsock)
- timeout = wait_woken(&wait, TASK_UNINTERRUPTIBLE,
- timeout);
- sched_annotate_sleep();
- lock_sock(sk);
- if (!smc->clcsock)
- break;
- } while (timeout);
- remove_wait_queue(sk_sleep(sk), &wait);
-}
-
/* wait for sndbuf data being transmitted */
static void smc_close_stream_wait(struct smc_sock *smc, long timeout)
{
@@ -204,9 +183,11 @@ again:
rc = kernel_sock_shutdown(smc->clcsock, SHUT_RDWR);
/* wake up kernel_accept of smc_tcp_listen_worker */
smc->clcsock->sk->sk_data_ready(smc->clcsock->sk);
- smc_close_wait_listen_clcsock(smc);
}
smc_close_cleanup_listen(sk);
+ release_sock(sk);
+ flush_work(&smc->tcp_listen_work);
+ lock_sock(sk);
break;
case SMC_ACTIVE:
smc_close_stream_wait(smc, timeout);
diff --git a/net/socket.c b/net/socket.c
index a93c99b518cab6..08847c3b8c39f7 100644
--- a/net/socket.c
+++ b/net/socket.c
@@ -2587,6 +2587,11 @@ void sock_unregister(int family)
}
EXPORT_SYMBOL(sock_unregister);
+bool sock_is_registered(int family)
+{
+ return family < NPROTO && rcu_access_pointer(net_families[family]);
+}
+
static int __init sock_init(void)
{
int err;
diff --git a/net/xfrm/xfrm_ipcomp.c b/net/xfrm/xfrm_ipcomp.c
index ccfdc7115a83f7..a00ec715aa4681 100644
--- a/net/xfrm/xfrm_ipcomp.c
+++ b/net/xfrm/xfrm_ipcomp.c
@@ -283,7 +283,7 @@ static struct crypto_comp * __percpu *ipcomp_alloc_tfms(const char *alg_name)
struct crypto_comp *tfm;
/* This can be any valid CPU ID so we don't need locking. */
- tfm = __this_cpu_read(*pos->tfms);
+ tfm = this_cpu_read(*pos->tfms);
if (!strcmp(crypto_comp_name(tfm), alg_name)) {
pos->users++;
diff --git a/net/xfrm/xfrm_policy.c b/net/xfrm/xfrm_policy.c
index 7a23078132cf52..625b3fca570419 100644
--- a/net/xfrm/xfrm_policy.c
+++ b/net/xfrm/xfrm_policy.c
@@ -1458,10 +1458,13 @@ xfrm_tmpl_resolve(struct xfrm_policy **pols, int npols, const struct flowi *fl,
static int xfrm_get_tos(const struct flowi *fl, int family)
{
const struct xfrm_policy_afinfo *afinfo;
- int tos = 0;
+ int tos;
afinfo = xfrm_policy_get_afinfo(family);
- tos = afinfo ? afinfo->get_tos(fl) : 0;
+ if (!afinfo)
+ return 0;
+
+ tos = afinfo->get_tos(fl);
rcu_read_unlock();
@@ -1891,7 +1894,7 @@ static void xfrm_policy_queue_process(struct timer_list *t)
spin_unlock(&pq->hold_queue.lock);
dst_hold(xfrm_dst_path(dst));
- dst = xfrm_lookup(net, xfrm_dst_path(dst), &fl, sk, 0);
+ dst = xfrm_lookup(net, xfrm_dst_path(dst), &fl, sk, XFRM_LOOKUP_QUEUE);
if (IS_ERR(dst))
goto purge_queue;
@@ -2729,14 +2732,14 @@ static const void *xfrm_get_dst_nexthop(const struct dst_entry *dst,
while (dst->xfrm) {
const struct xfrm_state *xfrm = dst->xfrm;
+ dst = xfrm_dst_child(dst);
+
if (xfrm->props.mode == XFRM_MODE_TRANSPORT)
continue;
if (xfrm->type->flags & XFRM_TYPE_REMOTE_COADDR)
daddr = xfrm->coaddr;
else if (!(xfrm->type->flags & XFRM_TYPE_LOCAL_COADDR))
daddr = &xfrm->id.daddr;
-
- dst = xfrm_dst_child(dst);
}
return daddr;
}
diff --git a/net/xfrm/xfrm_replay.c b/net/xfrm/xfrm_replay.c
index 1d38c6acf8afbb..9e3a5e85f8285e 100644
--- a/net/xfrm/xfrm_replay.c
+++ b/net/xfrm/xfrm_replay.c
@@ -660,7 +660,7 @@ static int xfrm_replay_overflow_offload_esn(struct xfrm_state *x, struct sk_buff
} else {
XFRM_SKB_CB(skb)->seq.output.low = oseq + 1;
XFRM_SKB_CB(skb)->seq.output.hi = oseq_hi;
- xo->seq.low = oseq = oseq + 1;
+ xo->seq.low = oseq + 1;
xo->seq.hi = oseq_hi;
oseq += skb_shinfo(skb)->gso_segs;
}
diff --git a/net/xfrm/xfrm_state.c b/net/xfrm/xfrm_state.c
index 54e21f19d722c4..f9d2f2233f0953 100644
--- a/net/xfrm/xfrm_state.c
+++ b/net/xfrm/xfrm_state.c
@@ -2056,6 +2056,11 @@ int xfrm_user_policy(struct sock *sk, int optname, u8 __user *optval, int optlen
struct xfrm_mgr *km;
struct xfrm_policy *pol = NULL;
+#ifdef CONFIG_COMPAT
+ if (in_compat_syscall())
+ return -EOPNOTSUPP;
+#endif
+
if (!optval && !optlen) {
xfrm_sk_policy_insert(sk, XFRM_POLICY_IN, NULL);
xfrm_sk_policy_insert(sk, XFRM_POLICY_OUT, NULL);
diff --git a/net/xfrm/xfrm_user.c b/net/xfrm/xfrm_user.c
index 7f52b8eb177db4..080035f056d992 100644
--- a/net/xfrm/xfrm_user.c
+++ b/net/xfrm/xfrm_user.c
@@ -121,22 +121,17 @@ static inline int verify_replay(struct xfrm_usersa_info *p,
struct nlattr *rt = attrs[XFRMA_REPLAY_ESN_VAL];
struct xfrm_replay_state_esn *rs;
- if (p->flags & XFRM_STATE_ESN) {
- if (!rt)
- return -EINVAL;
+ if (!rt)
+ return (p->flags & XFRM_STATE_ESN) ? -EINVAL : 0;
- rs = nla_data(rt);
+ rs = nla_data(rt);
- if (rs->bmp_len > XFRMA_REPLAY_ESN_MAX / sizeof(rs->bmp[0]) / 8)
- return -EINVAL;
-
- if (nla_len(rt) < (int)xfrm_replay_state_esn_len(rs) &&
- nla_len(rt) != sizeof(*rs))
- return -EINVAL;
- }
+ if (rs->bmp_len > XFRMA_REPLAY_ESN_MAX / sizeof(rs->bmp[0]) / 8)
+ return -EINVAL;
- if (!rt)
- return 0;
+ if (nla_len(rt) < (int)xfrm_replay_state_esn_len(rs) &&
+ nla_len(rt) != sizeof(*rs))
+ return -EINVAL;
/* As only ESP and AH support ESN feature. */
if ((p->id.proto != IPPROTO_ESP) && (p->id.proto != IPPROTO_AH))
diff --git a/tools/bpf/bpftool/common.c b/tools/bpf/bpftool/common.c
index 0b482c0070e04f..465995281dcd36 100644
--- a/tools/bpf/bpftool/common.c
+++ b/tools/bpf/bpftool/common.c
@@ -55,6 +55,10 @@
#include "main.h"
+#ifndef BPF_FS_MAGIC
+#define BPF_FS_MAGIC 0xcafe4a11
+#endif
+
void p_err(const char *fmt, ...)
{
va_list ap;