This is a note to let you know that I've just added the patch titled
ppp: avoid loop in xmit recursion detection code
to the 4.15-stable tree which can be found at:
http://www.kernel.org/git/?p=linux/kernel/git/stable/stable-queue.git;a=sum…
The filename of the patch is:
ppp-avoid-loop-in-xmit-recursion-detection-code.patch
and it can be found in the queue-4.15 subdirectory.
If you, or anyone else, feels it should not be added to the stable tree,
please let <stable(a)vger.kernel.org> know about it.
>From foo@baz Wed Mar 28 18:37:51 CEST 2018
From: Guillaume Nault <g.nault(a)alphalink.fr>
Date: Tue, 20 Mar 2018 16:49:26 +0100
Subject: ppp: avoid loop in xmit recursion detection code
From: Guillaume Nault <g.nault(a)alphalink.fr>
[ Upstream commit 6d066734e9f09cdea4a3b9cb76136db3f29cfb02 ]
We already detect situations where a PPP channel sends packets back to
its upper PPP device. While this is enough to avoid deadlocking on xmit
locks, this doesn't prevent packets from looping between the channel
and the unit.
The problem is that ppp_start_xmit() enqueues packets in ppp->file.xq
before checking for xmit recursion. Therefore, __ppp_xmit_process()
might dequeue a packet from ppp->file.xq and send it on the channel
which, in turn, loops it back on the unit. Then ppp_start_xmit()
queues the packet back to ppp->file.xq and __ppp_xmit_process() picks
it up and sends it again through the channel. Therefore, the packet
will loop between __ppp_xmit_process() and ppp_start_xmit() until some
other part of the xmit path drops it.
For L2TP, we rapidly fill the skb's headroom and pppol2tp_xmit() drops
the packet after a few iterations. But PPTP reallocates the headroom
if necessary, letting the loop run and exhaust the machine resources
(as reported in https://bugzilla.kernel.org/show_bug.cgi?id=199109).
Fix this by letting __ppp_xmit_process() enqueue the skb to
ppp->file.xq, so that we can check for recursion before adding it to
the queue. Now ppp_xmit_process() can drop the packet when recursion is
detected.
__ppp_channel_push() is a bit special. It calls __ppp_xmit_process()
without having any actual packet to send. This is used by
ppp_output_wakeup() to re-enable transmission on the parent unit (for
implementations like ppp_async.c, where the .start_xmit() function
might not consume the skb, leaving it in ppp->xmit_pending and
disabling transmission).
Therefore, __ppp_xmit_process() needs to handle the case where skb is
NULL, dequeuing as many packets as possible from ppp->file.xq.
Reported-by: xu heng <xuheng333(a)zoho.com>
Fixes: 55454a565836 ("ppp: avoid dealock on recursive xmit")
Signed-off-by: Guillaume Nault <g.nault(a)alphalink.fr>
Signed-off-by: David S. Miller <davem(a)davemloft.net>
Signed-off-by: Greg Kroah-Hartman <gregkh(a)linuxfoundation.org>
---
drivers/net/ppp/ppp_generic.c | 26 ++++++++++++++------------
1 file changed, 14 insertions(+), 12 deletions(-)
--- a/drivers/net/ppp/ppp_generic.c
+++ b/drivers/net/ppp/ppp_generic.c
@@ -257,7 +257,7 @@ struct ppp_net {
/* Prototypes. */
static int ppp_unattached_ioctl(struct net *net, struct ppp_file *pf,
struct file *file, unsigned int cmd, unsigned long arg);
-static void ppp_xmit_process(struct ppp *ppp);
+static void ppp_xmit_process(struct ppp *ppp, struct sk_buff *skb);
static void ppp_send_frame(struct ppp *ppp, struct sk_buff *skb);
static void ppp_push(struct ppp *ppp);
static void ppp_channel_push(struct channel *pch);
@@ -513,13 +513,12 @@ static ssize_t ppp_write(struct file *fi
goto out;
}
- skb_queue_tail(&pf->xq, skb);
-
switch (pf->kind) {
case INTERFACE:
- ppp_xmit_process(PF_TO_PPP(pf));
+ ppp_xmit_process(PF_TO_PPP(pf), skb);
break;
case CHANNEL:
+ skb_queue_tail(&pf->xq, skb);
ppp_channel_push(PF_TO_CHANNEL(pf));
break;
}
@@ -1267,8 +1266,8 @@ ppp_start_xmit(struct sk_buff *skb, stru
put_unaligned_be16(proto, pp);
skb_scrub_packet(skb, !net_eq(ppp->ppp_net, dev_net(dev)));
- skb_queue_tail(&ppp->file.xq, skb);
- ppp_xmit_process(ppp);
+ ppp_xmit_process(ppp, skb);
+
return NETDEV_TX_OK;
outf:
@@ -1420,13 +1419,14 @@ static void ppp_setup(struct net_device
*/
/* Called to do any work queued up on the transmit side that can now be done */
-static void __ppp_xmit_process(struct ppp *ppp)
+static void __ppp_xmit_process(struct ppp *ppp, struct sk_buff *skb)
{
- struct sk_buff *skb;
-
ppp_xmit_lock(ppp);
if (!ppp->closing) {
ppp_push(ppp);
+
+ if (skb)
+ skb_queue_tail(&ppp->file.xq, skb);
while (!ppp->xmit_pending &&
(skb = skb_dequeue(&ppp->file.xq)))
ppp_send_frame(ppp, skb);
@@ -1440,7 +1440,7 @@ static void __ppp_xmit_process(struct pp
ppp_xmit_unlock(ppp);
}
-static void ppp_xmit_process(struct ppp *ppp)
+static void ppp_xmit_process(struct ppp *ppp, struct sk_buff *skb)
{
local_bh_disable();
@@ -1448,7 +1448,7 @@ static void ppp_xmit_process(struct ppp
goto err;
(*this_cpu_ptr(ppp->xmit_recursion))++;
- __ppp_xmit_process(ppp);
+ __ppp_xmit_process(ppp, skb);
(*this_cpu_ptr(ppp->xmit_recursion))--;
local_bh_enable();
@@ -1458,6 +1458,8 @@ static void ppp_xmit_process(struct ppp
err:
local_bh_enable();
+ kfree_skb(skb);
+
if (net_ratelimit())
netdev_err(ppp->dev, "recursion detected\n");
}
@@ -1942,7 +1944,7 @@ static void __ppp_channel_push(struct ch
if (skb_queue_empty(&pch->file.xq)) {
ppp = pch->ppp;
if (ppp)
- __ppp_xmit_process(ppp);
+ __ppp_xmit_process(ppp, NULL);
}
}
Patches currently in stable-queue which might be from g.nault(a)alphalink.fr are
queue-4.15/ppp-avoid-loop-in-xmit-recursion-detection-code.patch
queue-4.15/ipv6-old_dport-should-be-a-__be16-in-__ip6_datagram_connect.patch
queue-4.15/l2tp-do-not-accept-arbitrary-sockets.patch
This is a note to let you know that I've just added the patch titled
openvswitch: meter: fix the incorrect calculation of max delta_t
to the 4.15-stable tree which can be found at:
http://www.kernel.org/git/?p=linux/kernel/git/stable/stable-queue.git;a=sum…
The filename of the patch is:
openvswitch-meter-fix-the-incorrect-calculation-of-max-delta_t.patch
and it can be found in the queue-4.15 subdirectory.
If you, or anyone else, feels it should not be added to the stable tree,
please let <stable(a)vger.kernel.org> know about it.
>From foo@baz Wed Mar 28 18:37:51 CEST 2018
From: zhangliping <zhangliping02(a)baidu.com>
Date: Fri, 9 Mar 2018 10:08:50 +0800
Subject: openvswitch: meter: fix the incorrect calculation of max delta_t
From: zhangliping <zhangliping02(a)baidu.com>
[ Usptream commit ddc502dfed600bff0b61d899f70d95b76223fdfc ]
Max delat_t should be the full_bucket/rate instead of the full_bucket.
Also report EINVAL if the rate is zero.
Fixes: 96fbc13d7e77 ("openvswitch: Add meter infrastructure")
Cc: Andy Zhou <azhou(a)ovn.org>
Signed-off-by: zhangliping <zhangliping02(a)baidu.com>
Acked-by: Pravin B Shelar <pshelar(a)ovn.org>
Signed-off-by: David S. Miller <davem(a)davemloft.net>
Signed-off-by: Greg Kroah-Hartman <gregkh(a)linuxfoundation.org>
---
net/openvswitch/meter.c | 12 +++++++++---
1 file changed, 9 insertions(+), 3 deletions(-)
--- a/net/openvswitch/meter.c
+++ b/net/openvswitch/meter.c
@@ -242,14 +242,20 @@ static struct dp_meter *dp_meter_create(
band->type = nla_get_u32(attr[OVS_BAND_ATTR_TYPE]);
band->rate = nla_get_u32(attr[OVS_BAND_ATTR_RATE]);
+ if (band->rate == 0) {
+ err = -EINVAL;
+ goto exit_free_meter;
+ }
+
band->burst_size = nla_get_u32(attr[OVS_BAND_ATTR_BURST]);
/* Figure out max delta_t that is enough to fill any bucket.
* Keep max_delta_t size to the bucket units:
* pkts => 1/1000 packets, kilobits => bits.
+ *
+ * Start with a full bucket.
*/
- band_max_delta_t = (band->burst_size + band->rate) * 1000;
- /* Start with a full bucket. */
- band->bucket = band_max_delta_t;
+ band->bucket = (band->burst_size + band->rate) * 1000;
+ band_max_delta_t = band->bucket / band->rate;
if (band_max_delta_t > meter->max_delta_t)
meter->max_delta_t = band_max_delta_t;
band++;
Patches currently in stable-queue which might be from zhangliping02(a)baidu.com are
queue-4.15/openvswitch-meter-fix-the-incorrect-calculation-of-max-delta_t.patch
This is a note to let you know that I've just added the patch titled
netlink: avoid a double skb free in genlmsg_mcast()
to the 4.15-stable tree which can be found at:
http://www.kernel.org/git/?p=linux/kernel/git/stable/stable-queue.git;a=sum…
The filename of the patch is:
netlink-avoid-a-double-skb-free-in-genlmsg_mcast.patch
and it can be found in the queue-4.15 subdirectory.
If you, or anyone else, feels it should not be added to the stable tree,
please let <stable(a)vger.kernel.org> know about it.
>From foo@baz Wed Mar 28 18:37:51 CEST 2018
From: Nicolas Dichtel <nicolas.dichtel(a)6wind.com>
Date: Wed, 14 Mar 2018 21:10:23 +0100
Subject: netlink: avoid a double skb free in genlmsg_mcast()
From: Nicolas Dichtel <nicolas.dichtel(a)6wind.com>
[ Upstream commit 02a2385f37a7c6594c9d89b64c4a1451276f08eb ]
nlmsg_multicast() consumes always the skb, thus the original skb must be
freed only when this function is called with a clone.
Fixes: cb9f7a9a5c96 ("netlink: ensure to loop over all netns in genlmsg_multicast_allns()")
Reported-by: Ben Hutchings <ben.hutchings(a)codethink.co.uk>
Signed-off-by: Nicolas Dichtel <nicolas.dichtel(a)6wind.com>
Signed-off-by: David S. Miller <davem(a)davemloft.net>
Signed-off-by: Greg Kroah-Hartman <gregkh(a)linuxfoundation.org>
---
net/netlink/genetlink.c | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
--- a/net/netlink/genetlink.c
+++ b/net/netlink/genetlink.c
@@ -1106,7 +1106,7 @@ static int genlmsg_mcast(struct sk_buff
if (!err)
delivered = true;
else if (err != -ESRCH)
- goto error;
+ return err;
return delivered ? 0 : -ESRCH;
error:
kfree_skb(skb);
Patches currently in stable-queue which might be from nicolas.dichtel(a)6wind.com are
queue-4.15/netlink-avoid-a-double-skb-free-in-genlmsg_mcast.patch
This is a note to let you know that I've just added the patch titled
net: use skb_to_full_sk() in skb_update_prio()
to the 4.15-stable tree which can be found at:
http://www.kernel.org/git/?p=linux/kernel/git/stable/stable-queue.git;a=sum…
The filename of the patch is:
net-use-skb_to_full_sk-in-skb_update_prio.patch
and it can be found in the queue-4.15 subdirectory.
If you, or anyone else, feels it should not be added to the stable tree,
please let <stable(a)vger.kernel.org> know about it.
>From foo@baz Wed Mar 28 18:37:51 CEST 2018
From: Eric Dumazet <edumazet(a)google.com>
Date: Wed, 14 Mar 2018 09:04:16 -0700
Subject: net: use skb_to_full_sk() in skb_update_prio()
From: Eric Dumazet <edumazet(a)google.com>
[ Upstream commit 4dcb31d4649df36297296b819437709f5407059c ]
Andrei Vagin reported a KASAN: slab-out-of-bounds error in
skb_update_prio()
Since SYNACK might be attached to a request socket, we need to
get back to the listener socket.
Since this listener is manipulated without locks, add const
qualifiers to sock_cgroup_prioidx() so that the const can also
be used in skb_update_prio()
Also add the const qualifier to sock_cgroup_classid() for consistency.
Fixes: ca6fb0651883 ("tcp: attach SYNACK messages to request sockets instead of listener")
Signed-off-by: Eric Dumazet <edumazet(a)google.com>
Reported-by: Andrei Vagin <avagin(a)virtuozzo.com>
Signed-off-by: David S. Miller <davem(a)davemloft.net>
Signed-off-by: Greg Kroah-Hartman <gregkh(a)linuxfoundation.org>
---
include/linux/cgroup-defs.h | 4 ++--
net/core/dev.c | 22 +++++++++++++++-------
2 files changed, 17 insertions(+), 9 deletions(-)
--- a/include/linux/cgroup-defs.h
+++ b/include/linux/cgroup-defs.h
@@ -755,13 +755,13 @@ struct sock_cgroup_data {
* updaters and return part of the previous pointer as the prioidx or
* classid. Such races are short-lived and the result isn't critical.
*/
-static inline u16 sock_cgroup_prioidx(struct sock_cgroup_data *skcd)
+static inline u16 sock_cgroup_prioidx(const struct sock_cgroup_data *skcd)
{
/* fallback to 1 which is always the ID of the root cgroup */
return (skcd->is_data & 1) ? skcd->prioidx : 1;
}
-static inline u32 sock_cgroup_classid(struct sock_cgroup_data *skcd)
+static inline u32 sock_cgroup_classid(const struct sock_cgroup_data *skcd)
{
/* fallback to 0 which is the unconfigured default classid */
return (skcd->is_data & 1) ? skcd->classid : 0;
--- a/net/core/dev.c
+++ b/net/core/dev.c
@@ -3247,15 +3247,23 @@ static inline int __dev_xmit_skb(struct
#if IS_ENABLED(CONFIG_CGROUP_NET_PRIO)
static void skb_update_prio(struct sk_buff *skb)
{
- struct netprio_map *map = rcu_dereference_bh(skb->dev->priomap);
+ const struct netprio_map *map;
+ const struct sock *sk;
+ unsigned int prioidx;
- if (!skb->priority && skb->sk && map) {
- unsigned int prioidx =
- sock_cgroup_prioidx(&skb->sk->sk_cgrp_data);
+ if (skb->priority)
+ return;
+ map = rcu_dereference_bh(skb->dev->priomap);
+ if (!map)
+ return;
+ sk = skb_to_full_sk(skb);
+ if (!sk)
+ return;
- if (prioidx < map->priomap_len)
- skb->priority = map->priomap[prioidx];
- }
+ prioidx = sock_cgroup_prioidx(&sk->sk_cgrp_data);
+
+ if (prioidx < map->priomap_len)
+ skb->priority = map->priomap[prioidx];
}
#else
#define skb_update_prio(skb)
Patches currently in stable-queue which might be from edumazet(a)google.com are
queue-4.15/ipv6-fix-access-to-non-linear-packet-in-ndisc_fill_redirect_hdr_option.patch
queue-4.15/skbuff-fix-not-waking-applications-when-errors-are-enqueued.patch
queue-4.15/l2tp-do-not-accept-arbitrary-sockets.patch
queue-4.15/tcp-purge-write-queue-upon-aborting-the-connection.patch
queue-4.15/net-use-skb_to_full_sk-in-skb_update_prio.patch
queue-4.15/ieee802154-6lowpan-fix-possible-null-deref-in-lowpan_device_event.patch
This is a note to let you know that I've just added the patch titled
net: systemport: Rewrite __bcm_sysport_tx_reclaim()
to the 4.15-stable tree which can be found at:
http://www.kernel.org/git/?p=linux/kernel/git/stable/stable-queue.git;a=sum…
The filename of the patch is:
net-systemport-rewrite-__bcm_sysport_tx_reclaim.patch
and it can be found in the queue-4.15 subdirectory.
If you, or anyone else, feels it should not be added to the stable tree,
please let <stable(a)vger.kernel.org> know about it.
>From foo@baz Wed Mar 28 18:37:51 CEST 2018
From: Florian Fainelli <f.fainelli(a)gmail.com>
Date: Tue, 13 Mar 2018 14:45:07 -0700
Subject: net: systemport: Rewrite __bcm_sysport_tx_reclaim()
From: Florian Fainelli <f.fainelli(a)gmail.com>
[ Upstream commit 484d802d0f2f29c335563fcac2a8facf174a1bbc ]
There is no need for complex checking between the last consumed index
and current consumed index, a simple subtraction will do.
This also eliminates the possibility of a permanent transmit queue stall
under the following conditions:
- one CPU bursts ring->size worth of traffic (up to 256 buffers), to the
point where we run out of free descriptors, so we stop the transmit
queue at the end of bcm_sysport_xmit()
- because of our locking, we have the transmit process disable
interrupts which means we can be blocking the TX reclamation process
- when TX reclamation finally runs, we will be computing the difference
between ring->c_index (last consumed index by SW) and what the HW
reports through its register
- this register is masked with (ring->size - 1) = 0xff, which will lead
to stripping the upper bits of the index (register is 16-bits wide)
- we will be computing last_tx_cn as 0, which means there is no work to
be done, and we never wake-up the transmit queue, leaving it
permanently disabled
A practical example is e.g: ring->c_index aka last_c_index = 12, we
pushed 256 entries, HW consumer index = 268, we mask it with 0xff = 12,
so last_tx_cn == 0, nothing happens.
Fixes: 80105befdb4b ("net: systemport: add Broadcom SYSTEMPORT Ethernet MAC driver")
Signed-off-by: Florian Fainelli <f.fainelli(a)gmail.com>
Signed-off-by: David S. Miller <davem(a)davemloft.net>
Signed-off-by: Greg Kroah-Hartman <gregkh(a)linuxfoundation.org>
---
drivers/net/ethernet/broadcom/bcmsysport.c | 33 +++++++++++++----------------
drivers/net/ethernet/broadcom/bcmsysport.h | 2 -
2 files changed, 16 insertions(+), 19 deletions(-)
--- a/drivers/net/ethernet/broadcom/bcmsysport.c
+++ b/drivers/net/ethernet/broadcom/bcmsysport.c
@@ -855,10 +855,12 @@ static void bcm_sysport_tx_reclaim_one(s
static unsigned int __bcm_sysport_tx_reclaim(struct bcm_sysport_priv *priv,
struct bcm_sysport_tx_ring *ring)
{
- unsigned int c_index, last_c_index, last_tx_cn, num_tx_cbs;
unsigned int pkts_compl = 0, bytes_compl = 0;
struct net_device *ndev = priv->netdev;
+ unsigned int txbds_processed = 0;
struct bcm_sysport_cb *cb;
+ unsigned int txbds_ready;
+ unsigned int c_index;
u32 hw_ind;
/* Clear status before servicing to reduce spurious interrupts */
@@ -871,29 +873,23 @@ static unsigned int __bcm_sysport_tx_rec
/* Compute how many descriptors have been processed since last call */
hw_ind = tdma_readl(priv, TDMA_DESC_RING_PROD_CONS_INDEX(ring->index));
c_index = (hw_ind >> RING_CONS_INDEX_SHIFT) & RING_CONS_INDEX_MASK;
- ring->p_index = (hw_ind & RING_PROD_INDEX_MASK);
-
- last_c_index = ring->c_index;
- num_tx_cbs = ring->size;
-
- c_index &= (num_tx_cbs - 1);
-
- if (c_index >= last_c_index)
- last_tx_cn = c_index - last_c_index;
- else
- last_tx_cn = num_tx_cbs - last_c_index + c_index;
+ txbds_ready = (c_index - ring->c_index) & RING_CONS_INDEX_MASK;
netif_dbg(priv, tx_done, ndev,
- "ring=%d c_index=%d last_tx_cn=%d last_c_index=%d\n",
- ring->index, c_index, last_tx_cn, last_c_index);
+ "ring=%d old_c_index=%u c_index=%u txbds_ready=%u\n",
+ ring->index, ring->c_index, c_index, txbds_ready);
- while (last_tx_cn-- > 0) {
- cb = ring->cbs + last_c_index;
+ while (txbds_processed < txbds_ready) {
+ cb = &ring->cbs[ring->clean_index];
bcm_sysport_tx_reclaim_one(ring, cb, &bytes_compl, &pkts_compl);
ring->desc_count++;
- last_c_index++;
- last_c_index &= (num_tx_cbs - 1);
+ txbds_processed++;
+
+ if (likely(ring->clean_index < ring->size - 1))
+ ring->clean_index++;
+ else
+ ring->clean_index = 0;
}
u64_stats_update_begin(&priv->syncp);
@@ -1406,6 +1402,7 @@ static int bcm_sysport_init_tx_ring(stru
netif_tx_napi_add(priv->netdev, &ring->napi, bcm_sysport_tx_poll, 64);
ring->index = index;
ring->size = size;
+ ring->clean_index = 0;
ring->alloc_size = ring->size;
ring->desc_cpu = p;
ring->desc_count = ring->size;
--- a/drivers/net/ethernet/broadcom/bcmsysport.h
+++ b/drivers/net/ethernet/broadcom/bcmsysport.h
@@ -706,7 +706,7 @@ struct bcm_sysport_tx_ring {
unsigned int desc_count; /* Number of descriptors */
unsigned int curr_desc; /* Current descriptor */
unsigned int c_index; /* Last consumer index */
- unsigned int p_index; /* Current producer index */
+ unsigned int clean_index; /* Current clean index */
struct bcm_sysport_cb *cbs; /* Transmit control blocks */
struct dma_desc *desc_cpu; /* CPU view of the descriptor */
struct bcm_sysport_priv *priv; /* private context backpointer */
Patches currently in stable-queue which might be from f.fainelli(a)gmail.com are
queue-4.15/net-dsa-fix-dsa_is_user_port-test-inversion.patch
queue-4.15/net-systemport-rewrite-__bcm_sysport_tx_reclaim.patch
queue-4.15/sysfs-symlink-export-sysfs_create_link_nowarn.patch
queue-4.15/net-fec-fix-unbalanced-pm-runtime-calls.patch
queue-4.15/net-phy-relax-error-checking-when-creating-sysfs-link-netdev-phydev.patch
This is a note to let you know that I've just added the patch titled
net: phy: Tell caller result of phy_change()
to the 4.15-stable tree which can be found at:
http://www.kernel.org/git/?p=linux/kernel/git/stable/stable-queue.git;a=sum…
The filename of the patch is:
net-phy-tell-caller-result-of-phy_change.patch
and it can be found in the queue-4.15 subdirectory.
If you, or anyone else, feels it should not be added to the stable tree,
please let <stable(a)vger.kernel.org> know about it.
>From foo@baz Wed Mar 28 18:37:51 CEST 2018
From: Brad Mouring <brad.mouring(a)ni.com>
Date: Thu, 8 Mar 2018 16:23:03 -0600
Subject: net: phy: Tell caller result of phy_change()
From: Brad Mouring <brad.mouring(a)ni.com>
[ Upstream commit a2c054a896b8ac794ddcfc7c92e2dc7ec4ed4ed5 ]
In 664fcf123a30e (net: phy: Threaded interrupts allow some simplification)
the phy_interrupt system was changed to use a traditional threaded
interrupt scheme instead of a workqueue approach.
With this change, the phy status check moved into phy_change, which
did not report back to the caller whether or not the interrupt was
handled. This means that, in the case of a shared phy interrupt,
only the first phydev's interrupt registers are checked (since
phy_interrupt() would always return IRQ_HANDLED). This leads to
interrupt storms when it is a secondary device that's actually the
interrupt source.
Signed-off-by: Brad Mouring <brad.mouring(a)ni.com>
Signed-off-by: David S. Miller <davem(a)davemloft.net>
Signed-off-by: Greg Kroah-Hartman <gregkh(a)linuxfoundation.org>
---
drivers/net/phy/phy.c | 173 ++++++++++++++++++++++++--------------------------
include/linux/phy.h | 1
2 files changed, 86 insertions(+), 88 deletions(-)
--- a/drivers/net/phy/phy.c
+++ b/drivers/net/phy/phy.c
@@ -615,6 +615,91 @@ static void phy_error(struct phy_device
}
/**
+ * phy_disable_interrupts - Disable the PHY interrupts from the PHY side
+ * @phydev: target phy_device struct
+ */
+static int phy_disable_interrupts(struct phy_device *phydev)
+{
+ int err;
+
+ /* Disable PHY interrupts */
+ err = phy_config_interrupt(phydev, PHY_INTERRUPT_DISABLED);
+ if (err)
+ goto phy_err;
+
+ /* Clear the interrupt */
+ err = phy_clear_interrupt(phydev);
+ if (err)
+ goto phy_err;
+
+ return 0;
+
+phy_err:
+ phy_error(phydev);
+
+ return err;
+}
+
+/**
+ * phy_change - Called by the phy_interrupt to handle PHY changes
+ * @phydev: phy_device struct that interrupted
+ */
+static irqreturn_t phy_change(struct phy_device *phydev)
+{
+ if (phy_interrupt_is_valid(phydev)) {
+ if (phydev->drv->did_interrupt &&
+ !phydev->drv->did_interrupt(phydev))
+ goto ignore;
+
+ if (phy_disable_interrupts(phydev))
+ goto phy_err;
+ }
+
+ mutex_lock(&phydev->lock);
+ if ((PHY_RUNNING == phydev->state) || (PHY_NOLINK == phydev->state))
+ phydev->state = PHY_CHANGELINK;
+ mutex_unlock(&phydev->lock);
+
+ if (phy_interrupt_is_valid(phydev)) {
+ atomic_dec(&phydev->irq_disable);
+ enable_irq(phydev->irq);
+
+ /* Reenable interrupts */
+ if (PHY_HALTED != phydev->state &&
+ phy_config_interrupt(phydev, PHY_INTERRUPT_ENABLED))
+ goto irq_enable_err;
+ }
+
+ /* reschedule state queue work to run as soon as possible */
+ phy_trigger_machine(phydev, true);
+ return IRQ_HANDLED;
+
+ignore:
+ atomic_dec(&phydev->irq_disable);
+ enable_irq(phydev->irq);
+ return IRQ_NONE;
+
+irq_enable_err:
+ disable_irq(phydev->irq);
+ atomic_inc(&phydev->irq_disable);
+phy_err:
+ phy_error(phydev);
+ return IRQ_NONE;
+}
+
+/**
+ * phy_change_work - Scheduled by the phy_mac_interrupt to handle PHY changes
+ * @work: work_struct that describes the work to be done
+ */
+void phy_change_work(struct work_struct *work)
+{
+ struct phy_device *phydev =
+ container_of(work, struct phy_device, phy_queue);
+
+ phy_change(phydev);
+}
+
+/**
* phy_interrupt - PHY interrupt handler
* @irq: interrupt line
* @phy_dat: phy_device pointer
@@ -632,9 +717,7 @@ static irqreturn_t phy_interrupt(int irq
disable_irq_nosync(irq);
atomic_inc(&phydev->irq_disable);
- phy_change(phydev);
-
- return IRQ_HANDLED;
+ return phy_change(phydev);
}
/**
@@ -652,32 +735,6 @@ static int phy_enable_interrupts(struct
}
/**
- * phy_disable_interrupts - Disable the PHY interrupts from the PHY side
- * @phydev: target phy_device struct
- */
-static int phy_disable_interrupts(struct phy_device *phydev)
-{
- int err;
-
- /* Disable PHY interrupts */
- err = phy_config_interrupt(phydev, PHY_INTERRUPT_DISABLED);
- if (err)
- goto phy_err;
-
- /* Clear the interrupt */
- err = phy_clear_interrupt(phydev);
- if (err)
- goto phy_err;
-
- return 0;
-
-phy_err:
- phy_error(phydev);
-
- return err;
-}
-
-/**
* phy_start_interrupts - request and enable interrupts for a PHY device
* @phydev: target phy_device struct
*
@@ -728,64 +785,6 @@ int phy_stop_interrupts(struct phy_devic
EXPORT_SYMBOL(phy_stop_interrupts);
/**
- * phy_change - Called by the phy_interrupt to handle PHY changes
- * @phydev: phy_device struct that interrupted
- */
-void phy_change(struct phy_device *phydev)
-{
- if (phy_interrupt_is_valid(phydev)) {
- if (phydev->drv->did_interrupt &&
- !phydev->drv->did_interrupt(phydev))
- goto ignore;
-
- if (phy_disable_interrupts(phydev))
- goto phy_err;
- }
-
- mutex_lock(&phydev->lock);
- if ((PHY_RUNNING == phydev->state) || (PHY_NOLINK == phydev->state))
- phydev->state = PHY_CHANGELINK;
- mutex_unlock(&phydev->lock);
-
- if (phy_interrupt_is_valid(phydev)) {
- atomic_dec(&phydev->irq_disable);
- enable_irq(phydev->irq);
-
- /* Reenable interrupts */
- if (PHY_HALTED != phydev->state &&
- phy_config_interrupt(phydev, PHY_INTERRUPT_ENABLED))
- goto irq_enable_err;
- }
-
- /* reschedule state queue work to run as soon as possible */
- phy_trigger_machine(phydev, true);
- return;
-
-ignore:
- atomic_dec(&phydev->irq_disable);
- enable_irq(phydev->irq);
- return;
-
-irq_enable_err:
- disable_irq(phydev->irq);
- atomic_inc(&phydev->irq_disable);
-phy_err:
- phy_error(phydev);
-}
-
-/**
- * phy_change_work - Scheduled by the phy_mac_interrupt to handle PHY changes
- * @work: work_struct that describes the work to be done
- */
-void phy_change_work(struct work_struct *work)
-{
- struct phy_device *phydev =
- container_of(work, struct phy_device, phy_queue);
-
- phy_change(phydev);
-}
-
-/**
* phy_stop - Bring down the PHY link, and stop checking the status
* @phydev: target phy_device struct
*/
--- a/include/linux/phy.h
+++ b/include/linux/phy.h
@@ -897,7 +897,6 @@ int phy_driver_register(struct phy_drive
int phy_drivers_register(struct phy_driver *new_driver, int n,
struct module *owner);
void phy_state_machine(struct work_struct *work);
-void phy_change(struct phy_device *phydev);
void phy_change_work(struct work_struct *work);
void phy_mac_interrupt(struct phy_device *phydev, int new_link);
void phy_start_machine(struct phy_device *phydev);
Patches currently in stable-queue which might be from brad.mouring(a)ni.com are
queue-4.15/net-phy-tell-caller-result-of-phy_change.patch
This is a note to let you know that I've just added the patch titled
net: phy: relax error checking when creating sysfs link netdev->phydev
to the 4.15-stable tree which can be found at:
http://www.kernel.org/git/?p=linux/kernel/git/stable/stable-queue.git;a=sum…
The filename of the patch is:
net-phy-relax-error-checking-when-creating-sysfs-link-netdev-phydev.patch
and it can be found in the queue-4.15 subdirectory.
If you, or anyone else, feels it should not be added to the stable tree,
please let <stable(a)vger.kernel.org> know about it.
>From foo@baz Wed Mar 28 18:37:51 CEST 2018
From: Grygorii Strashko <grygorii.strashko(a)ti.com>
Date: Fri, 16 Mar 2018 17:08:35 -0500
Subject: net: phy: relax error checking when creating sysfs link netdev->phydev
From: Grygorii Strashko <grygorii.strashko(a)ti.com>
[ Upstream commit 4414b3ed74be0e205e04e12cd83542a727d88255 ]
Some ethernet drivers (like TI CPSW) may connect and manage >1 Net PHYs per
one netdevice, as result such drivers will produce warning during system
boot and fail to connect second phy to netdevice when PHYLIB framework
will try to create sysfs link netdev->phydev for second PHY
in phy_attach_direct(), because sysfs link with the same name has been
created already for the first PHY. As result, second CPSW external
port will became unusable.
Fix it by relaxing error checking when PHYLIB framework is creating sysfs
link netdev->phydev in phy_attach_direct(), suppressing warning by using
sysfs_create_link_nowarn() and adding error message instead.
After this change links (phy->netdev and netdev->phy) creation failure is not
fatal any more and system can continue working, which fixes TI CPSW issue.
Cc: Florian Fainelli <f.fainelli(a)gmail.com>
Cc: Andrew Lunn <andrew(a)lunn.ch>
Fixes: a3995460491d ("net: phy: Relax error checking on sysfs_create_link()")
Signed-off-by: Grygorii Strashko <grygorii.strashko(a)ti.com>
Signed-off-by: David S. Miller <davem(a)davemloft.net>
Signed-off-by: Greg Kroah-Hartman <gregkh(a)linuxfoundation.org>
---
drivers/net/phy/phy_device.c | 15 +++++++++++----
1 file changed, 11 insertions(+), 4 deletions(-)
--- a/drivers/net/phy/phy_device.c
+++ b/drivers/net/phy/phy_device.c
@@ -999,10 +999,17 @@ int phy_attach_direct(struct net_device
err = sysfs_create_link(&phydev->mdio.dev.kobj, &dev->dev.kobj,
"attached_dev");
if (!err) {
- err = sysfs_create_link(&dev->dev.kobj, &phydev->mdio.dev.kobj,
- "phydev");
- if (err)
- goto error;
+ err = sysfs_create_link_nowarn(&dev->dev.kobj,
+ &phydev->mdio.dev.kobj,
+ "phydev");
+ if (err) {
+ dev_err(&dev->dev, "could not add device link to %s err %d\n",
+ kobject_name(&phydev->mdio.dev.kobj),
+ err);
+ /* non-fatal - some net drivers can use one netdevice
+ * with more then one phy
+ */
+ }
phydev->sysfs_links = true;
}
Patches currently in stable-queue which might be from grygorii.strashko(a)ti.com are
queue-4.15/sysfs-symlink-export-sysfs_create_link_nowarn.patch
queue-4.15/net-ethernet-ti-cpsw-add-check-for-in-band-mode-setting-with-rgmii-phy-interface.patch
queue-4.15/net-phy-relax-error-checking-when-creating-sysfs-link-netdev-phydev.patch
This is a note to let you know that I've just added the patch titled
net: Only honor ifindex in IP_PKTINFO if non-0
to the 4.15-stable tree which can be found at:
http://www.kernel.org/git/?p=linux/kernel/git/stable/stable-queue.git;a=sum…
The filename of the patch is:
net-only-honor-ifindex-in-ip_pktinfo-if-non-0.patch
and it can be found in the queue-4.15 subdirectory.
If you, or anyone else, feels it should not be added to the stable tree,
please let <stable(a)vger.kernel.org> know about it.
>From foo@baz Wed Mar 28 18:37:51 CEST 2018
From: David Ahern <dsahern(a)gmail.com>
Date: Fri, 16 Feb 2018 11:03:03 -0800
Subject: net: Only honor ifindex in IP_PKTINFO if non-0
From: David Ahern <dsahern(a)gmail.com>
[ Upstream commit 2cbb4ea7de167b02ffa63e9cdfdb07a7e7094615 ]
Only allow ifindex from IP_PKTINFO to override SO_BINDTODEVICE settings
if the index is actually set in the message.
Signed-off-by: David Ahern <dsahern(a)gmail.com>
Signed-off-by: David S. Miller <davem(a)davemloft.net>
Signed-off-by: Greg Kroah-Hartman <gregkh(a)linuxfoundation.org>
---
net/ipv4/ip_sockglue.c | 6 ++++--
1 file changed, 4 insertions(+), 2 deletions(-)
--- a/net/ipv4/ip_sockglue.c
+++ b/net/ipv4/ip_sockglue.c
@@ -258,7 +258,8 @@ int ip_cmsg_send(struct sock *sk, struct
src_info = (struct in6_pktinfo *)CMSG_DATA(cmsg);
if (!ipv6_addr_v4mapped(&src_info->ipi6_addr))
return -EINVAL;
- ipc->oif = src_info->ipi6_ifindex;
+ if (src_info->ipi6_ifindex)
+ ipc->oif = src_info->ipi6_ifindex;
ipc->addr = src_info->ipi6_addr.s6_addr32[3];
continue;
}
@@ -288,7 +289,8 @@ int ip_cmsg_send(struct sock *sk, struct
if (cmsg->cmsg_len != CMSG_LEN(sizeof(struct in_pktinfo)))
return -EINVAL;
info = (struct in_pktinfo *)CMSG_DATA(cmsg);
- ipc->oif = info->ipi_ifindex;
+ if (info->ipi_ifindex)
+ ipc->oif = info->ipi_ifindex;
ipc->addr = info->ipi_spec_dst.s_addr;
break;
}
Patches currently in stable-queue which might be from dsahern(a)gmail.com are
queue-4.15/net-only-honor-ifindex-in-ip_pktinfo-if-non-0.patch
queue-4.15/ipv6-reflect-mtu-changes-on-pmtu-of-exceptions-for-mtu-less-routes.patch
This is a note to let you know that I've just added the patch titled
net/iucv: Free memory obtained by kzalloc
to the 4.15-stable tree which can be found at:
http://www.kernel.org/git/?p=linux/kernel/git/stable/stable-queue.git;a=sum…
The filename of the patch is:
net-iucv-free-memory-obtained-by-kzalloc.patch
and it can be found in the queue-4.15 subdirectory.
If you, or anyone else, feels it should not be added to the stable tree,
please let <stable(a)vger.kernel.org> know about it.
>From foo@baz Wed Mar 28 18:37:51 CEST 2018
From: Arvind Yadav <arvind.yadav.cs(a)gmail.com>
Date: Tue, 13 Mar 2018 16:50:06 +0100
Subject: net/iucv: Free memory obtained by kzalloc
From: Arvind Yadav <arvind.yadav.cs(a)gmail.com>
[ Upstream commit fa6a91e9b907231d2e38ea5ed89c537b3525df3d ]
Free memory by calling put_device(), if afiucv_iucv_init is not
successful.
Signed-off-by: Arvind Yadav <arvind.yadav.cs(a)gmail.com>
Reviewed-by: Cornelia Huck <cohuck(a)redhat.com>
Signed-off-by: Ursula Braun <ursula.braun(a)de.ibm.com>
Signed-off-by: Julian Wiedmann <jwi(a)linux.vnet.ibm.com>
Signed-off-by: David S. Miller <davem(a)davemloft.net>
Signed-off-by: Greg Kroah-Hartman <gregkh(a)linuxfoundation.org>
---
net/iucv/af_iucv.c | 4 +++-
1 file changed, 3 insertions(+), 1 deletion(-)
--- a/net/iucv/af_iucv.c
+++ b/net/iucv/af_iucv.c
@@ -2433,9 +2433,11 @@ static int afiucv_iucv_init(void)
af_iucv_dev->driver = &af_iucv_driver;
err = device_register(af_iucv_dev);
if (err)
- goto out_driver;
+ goto out_iucv_dev;
return 0;
+out_iucv_dev:
+ put_device(af_iucv_dev);
out_driver:
driver_unregister(&af_iucv_driver);
out_iucv:
Patches currently in stable-queue which might be from arvind.yadav.cs(a)gmail.com are
queue-4.15/net-iucv-free-memory-obtained-by-kzalloc.patch