This is a note to let you know that I've just added the patch titled
USB: serial: f81232: fix interrupt worker not stop
to my usb git tree which can be found at
git://git.kernel.org/pub/scm/linux/kernel/git/gregkh/usb.git
in the usb-testing branch.
The patch will show up in the next release of the linux-next tree
(usually sometime within the next 24 hours during the week.)
The patch will be merged to the usb-next branch sometime soon,
after it passes testing, and the merge window is open.
If you have any questions about this process, please let me know.
>From 804dbee1e49774918339c1e5a87400988c0819e8 Mon Sep 17 00:00:00 2001
From: "Ji-Ze Hong (Peter Hong)" <hpeter(a)gmail.com>
Date: Tue, 30 Apr 2019 09:22:29 +0800
Subject: USB: serial: f81232: fix interrupt worker not stop
The F81232 will use interrupt worker to handle MSR change.
This patch will fix the issue that interrupt work should stop
in close() and suspend().
This also fixes line-status events being disabled after a suspend cycle
until the port is re-opened.
Signed-off-by: Ji-Ze Hong (Peter Hong) <hpeter+linux_kernel(a)gmail.com>
[ johan: amend commit message ]
Fixes: 87fe5adcd8de ("USB: f81232: implement read IIR/MSR with endpoint")
Cc: stable <stable(a)vger.kernel.org> # 4.1
Signed-off-by: Johan Hovold <johan(a)kernel.org>
---
drivers/usb/serial/f81232.c | 39 +++++++++++++++++++++++++++++++++++++
1 file changed, 39 insertions(+)
diff --git a/drivers/usb/serial/f81232.c b/drivers/usb/serial/f81232.c
index 0dcdcb4b2cde..dee6f2caf9b5 100644
--- a/drivers/usb/serial/f81232.c
+++ b/drivers/usb/serial/f81232.c
@@ -556,9 +556,12 @@ static int f81232_open(struct tty_struct *tty, struct usb_serial_port *port)
static void f81232_close(struct usb_serial_port *port)
{
+ struct f81232_private *port_priv = usb_get_serial_port_data(port);
+
f81232_port_disable(port);
usb_serial_generic_close(port);
usb_kill_urb(port->interrupt_in_urb);
+ flush_work(&port_priv->interrupt_work);
}
static void f81232_dtr_rts(struct usb_serial_port *port, int on)
@@ -632,6 +635,40 @@ static int f81232_port_remove(struct usb_serial_port *port)
return 0;
}
+static int f81232_suspend(struct usb_serial *serial, pm_message_t message)
+{
+ struct usb_serial_port *port = serial->port[0];
+ struct f81232_private *port_priv = usb_get_serial_port_data(port);
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(port->read_urbs); ++i)
+ usb_kill_urb(port->read_urbs[i]);
+
+ usb_kill_urb(port->interrupt_in_urb);
+
+ if (port_priv)
+ flush_work(&port_priv->interrupt_work);
+
+ return 0;
+}
+
+static int f81232_resume(struct usb_serial *serial)
+{
+ struct usb_serial_port *port = serial->port[0];
+ int result;
+
+ if (tty_port_initialized(&port->port)) {
+ result = usb_submit_urb(port->interrupt_in_urb, GFP_NOIO);
+ if (result) {
+ dev_err(&port->dev, "submit interrupt urb failed: %d\n",
+ result);
+ return result;
+ }
+ }
+
+ return usb_serial_generic_resume(serial);
+}
+
static struct usb_serial_driver f81232_device = {
.driver = {
.owner = THIS_MODULE,
@@ -655,6 +692,8 @@ static struct usb_serial_driver f81232_device = {
.read_int_callback = f81232_read_int_callback,
.port_probe = f81232_port_probe,
.port_remove = f81232_port_remove,
+ .suspend = f81232_suspend,
+ .resume = f81232_resume,
};
static struct usb_serial_driver * const serial_drivers[] = {
--
2.21.0
During my rwsem testing, it was found that after a down_read(), the
reader count may occasionally become 0 or even negative. Consequently,
a writer may steal the lock at that time and execute with the reader
in parallel thus breaking the mutual exclusion guarantee of the write
lock. In other words, both readers and writer can become rwsem owners
simultaneously.
The current reader wakeup code does it in one pass to clear waiter->task
and put them into wake_q before fully incrementing the reader count.
Once waiter->task is cleared, the corresponding reader may see it,
finish the critical section and do unlock to decrement the count before
the count is incremented. This is not a problem if there is only one
reader to wake up as the count has been pre-incremented by 1. It is
a problem if there are more than one readers to be woken up and writer
can steal the lock.
The wakeup was actually done in 2 passes before the v4.9 commit
70800c3c0cc5 ("locking/rwsem: Scan the wait_list for readers only
once"). To fix this problem, the wakeup is now done in two passes
again. In the first pass, we collect the readers and count them. The
reader count is then fully incremented. In the second pass, the
waiter->task is then cleared and they are put into wake_q to be woken
up later.
Fixes: 70800c3c0cc5 ("locking/rwsem: Scan the wait_list for readers only once")
Cc: <stable(a)vger.kernel.org>
Signed-off-by: Waiman Long <longman(a)redhat.com>
---
kernel/locking/rwsem-xadd.c | 46 +++++++++++++++++++++++++------------
1 file changed, 31 insertions(+), 15 deletions(-)
diff --git a/kernel/locking/rwsem-xadd.c b/kernel/locking/rwsem-xadd.c
index 6b3ee9948bf1..0b1f77957240 100644
--- a/kernel/locking/rwsem-xadd.c
+++ b/kernel/locking/rwsem-xadd.c
@@ -130,6 +130,7 @@ static void __rwsem_mark_wake(struct rw_semaphore *sem,
{
struct rwsem_waiter *waiter, *tmp;
long oldcount, woken = 0, adjustment = 0;
+ struct list_head wlist;
/*
* Take a peek at the queue head waiter such that we can determine
@@ -188,18 +189,43 @@ static void __rwsem_mark_wake(struct rw_semaphore *sem,
* of the queue. We know that woken will be at least 1 as we accounted
* for above. Note we increment the 'active part' of the count by the
* number of readers before waking any processes up.
+ *
+ * We have to do wakeup in 2 passes to prevent the possibility that
+ * the reader count may be decremented before it is incremented. It
+ * is because the to-be-woken waiter may not have slept yet. So it
+ * may see waiter->task got cleared, finish its critical section and
+ * do an unlock before the reader count increment.
+ *
+ * 1) Collect the read-waiters in a separate list, count them and
+ * fully increment the reader count in rwsem.
+ * 2) For each waiters in the new list, clear waiter->task and
+ * put them into wake_q to be woken up later.
*/
- list_for_each_entry_safe(waiter, tmp, &sem->wait_list, list) {
- struct task_struct *tsk;
-
+ list_for_each_entry(waiter, &sem->wait_list, list) {
if (waiter->type == RWSEM_WAITING_FOR_WRITE)
break;
woken++;
- tsk = waiter->task;
+ }
+ list_cut_before(&wlist, &sem->wait_list, &waiter->list);
+
+ adjustment = woken * RWSEM_ACTIVE_READ_BIAS - adjustment;
+ lockevent_cond_inc(rwsem_wake_reader, woken);
+ if (list_empty(&sem->wait_list)) {
+ /* hit end of list above */
+ adjustment -= RWSEM_WAITING_BIAS;
+ }
+
+ if (adjustment)
+ atomic_long_add(adjustment, &sem->count);
+
+ /* 2nd pass */
+ list_for_each_entry_safe(waiter, tmp, &wlist, list) {
+ struct task_struct *tsk;
+ tsk = waiter->task;
get_task_struct(tsk);
- list_del(&waiter->list);
+
/*
* Ensure calling get_task_struct() before setting the reader
* waiter to nil such that rwsem_down_read_failed() cannot
@@ -213,16 +239,6 @@ static void __rwsem_mark_wake(struct rw_semaphore *sem,
*/
wake_q_add_safe(wake_q, tsk);
}
-
- adjustment = woken * RWSEM_ACTIVE_READ_BIAS - adjustment;
- lockevent_cond_inc(rwsem_wake_reader, woken);
- if (list_empty(&sem->wait_list)) {
- /* hit end of list above */
- adjustment -= RWSEM_WAITING_BIAS;
- }
-
- if (adjustment)
- atomic_long_add(adjustment, &sem->count);
}
/*
--
2.18.1
Changes since v5 [1]:
- Rebase on next-20190416 and the new 'struct mhp_restrictions'
infrastructure.
- Extend mhp_restrictions to the 'remove' case so the sub-section policy
can be clarified with respect to the memblock-api in a symmetric
manner with the 'add' case.
- Kill is_dev_zone() since cleanups have now made it moot
[1]: https://lwn.net/Articles/783808/
---
The memory hotplug section is an arbitrary / convenient unit for memory
hotplug. 'Section-size' units have bled into the user interface
('memblock' sysfs) and can not be changed without breaking existing
userspace. The section-size constraint, while mostly benign for typical
memory hotplug, has and continues to wreak havoc with 'device-memory'
use cases, persistent memory (pmem) in particular. Recall that pmem uses
devm_memremap_pages(), and subsequently arch_add_memory(), to allocate a
'struct page' memmap for pmem. However, it does not use the 'bottom
half' of memory hotplug, i.e. never marks pmem pages online and never
exposes the userspace memblock interface for pmem. This leaves an
opening to redress the section-size constraint.
To date, the libnvdimm subsystem has attempted to inject padding to
satisfy the internal constraints of arch_add_memory(). Beyond
complicating the code, leading to bugs [2], wasting memory, and limiting
configuration flexibility, the padding hack is broken when the platform
changes this physical memory alignment of pmem from one boot to the
next. Device failure (intermittent or permanent) and physical
reconfiguration are events that can cause the platform firmware to
change the physical placement of pmem on a subsequent boot, and device
failure is an everyday event in a data-center.
It turns out that sections are only a hard requirement of the
user-facing interface for memory hotplug and with a bit more
infrastructure sub-section arch_add_memory() support can be added for
kernel internal usages like devm_memremap_pages(). Here is an analysis
of the current design assumptions in the current code and how they are
addressed in the new implementation:
Current design assumptions:
- Sections that describe boot memory (early sections) are never
unplugged / removed.
- pfn_valid(), in the CONFIG_SPARSEMEM_VMEMMAP=y, case devolves to a
valid_section() check
- __add_pages() and helper routines assume all operations occur in
PAGES_PER_SECTION units.
- The memblock sysfs interface only comprehends full sections
New design assumptions:
- Sections are instrumented with a sub-section bitmask to track (on x86)
individual 2MB sub-divisions of a 128MB section.
- Partially populated early sections can be extended with additional
sub-sections, and those sub-sections can be removed with
arch_remove_memory(). With this in place we no longer lose usable memory
capacity to padding.
- pfn_valid() is updated to look deeper than valid_section() to also check the
active-sub-section mask. This indication is in the same cacheline as
the valid_section() so the performance impact is expected to be
negligible. So far the lkp robot has not reported any regressions.
- Outside of the core vmemmap population routines which are replaced,
other helper routines like shrink_{zone,pgdat}_span() are updated to
handle the smaller granularity. Core memory hotplug routines that deal
with online memory are not touched.
- The existing memblock sysfs user api guarantees / assumptions are
not touched since this capability is limited to !online
!memblock-sysfs-accessible sections.
Meanwhile the issue reports continue to roll in from users that do not
understand when and how the 128MB constraint will bite them. The current
implementation relied on being able to support at least one misaligned
namespace, but that immediately falls over on any moderately complex
namespace creation attempt. Beyond the initial problem of 'System RAM'
colliding with pmem, and the unsolvable problem of physical alignment
changes, Linux is now being exposed to platforms that collide pmem
ranges with other pmem ranges by default [3]. In short,
devm_memremap_pages() has pushed the venerable section-size constraint
past the breaking point, and the simplicity of section-aligned
arch_add_memory() is no longer tenable.
These patches are exposed to the kbuild robot on my libnvdimm-pending
branch [4], and a preview of the unit test for this functionality is
available on the 'subsection-pending' branch of ndctl [5].
[2]: https://lore.kernel.org/r/155000671719.348031.2347363160141119237.stgit@dwi…
[3]: https://github.com/pmem/ndctl/issues/76
[4]: https://git.kernel.org/pub/scm/linux/kernel/git/djbw/nvdimm.git/log/?h=libn…
[5]: https://github.com/pmem/ndctl/commit/7c59b4867e1c
---
Dan Williams (12):
mm/sparsemem: Introduce struct mem_section_usage
mm/sparsemem: Introduce common definitions for the size and mask of a section
mm/sparsemem: Add helpers track active portions of a section at boot
mm/hotplug: Prepare shrink_{zone,pgdat}_span for sub-section removal
mm/sparsemem: Convert kmalloc_section_memmap() to populate_section_memmap()
mm/hotplug: Add mem-hotplug restrictions for remove_memory()
mm: Kill is_dev_zone() helper
mm/sparsemem: Prepare for sub-section ranges
mm/sparsemem: Support sub-section hotplug
mm/devm_memremap_pages: Enable sub-section remap
libnvdimm/pfn: Fix fsdax-mode namespace info-block zero-fields
libnvdimm/pfn: Stop padding pmem namespaces to section alignment
arch/ia64/mm/init.c | 4
arch/powerpc/mm/mem.c | 5 -
arch/s390/mm/init.c | 2
arch/sh/mm/init.c | 4
arch/x86/mm/init_32.c | 4
arch/x86/mm/init_64.c | 9 +
drivers/nvdimm/dax_devs.c | 2
drivers/nvdimm/pfn.h | 12 -
drivers/nvdimm/pfn_devs.c | 93 +++-------
include/linux/memory_hotplug.h | 12 +
include/linux/mm.h | 4
include/linux/mmzone.h | 72 ++++++--
kernel/memremap.c | 70 +++-----
mm/hmm.c | 2
mm/memory_hotplug.c | 148 +++++++++-------
mm/page_alloc.c | 8 +
mm/sparse-vmemmap.c | 21 ++
mm/sparse.c | 371 +++++++++++++++++++++++++++-------------
18 files changed, 503 insertions(+), 340 deletions(-)
Since commit 1e434b703248 ("ARM: imx: update the cpu power up timing
setting on i.mx6sx") some characters loss is noticed on i.MX6ULL UART
as reported by Christoph Niedermaier.
The intention of such commit was to increase the SW2ISO field for i.MX6SX
only, but since cpuidle-imx6sx is also used on i.MX6UL/i.MX6ULL this caused
unintended side effects on other SoCs.
Fix this problem by keeping the original SW2ISO value for i.MX6UL/i.MX6ULL
and only increase SW2ISO in the i.MX6SX case.
Cc: stable(a)vger.kernel.org
Fixes: 1e434b703248 ("ARM: imx: update the cpu power up timing setting on i.mx6sx")
Reported-by: Christoph Niedermaier <cniedermaier(a)dh-electronics.com>
Signed-off-by: Fabio Estevam <festevam(a)gmail.com>
---
arch/arm/mach-imx/cpuidle-imx6sx.c | 9 ++++++++-
1 file changed, 8 insertions(+), 1 deletion(-)
diff --git a/arch/arm/mach-imx/cpuidle-imx6sx.c b/arch/arm/mach-imx/cpuidle-imx6sx.c
index fd0053e47a15..57cb9c763222 100644
--- a/arch/arm/mach-imx/cpuidle-imx6sx.c
+++ b/arch/arm/mach-imx/cpuidle-imx6sx.c
@@ -15,6 +15,7 @@
#include "common.h"
#include "cpuidle.h"
+#include "hardware.h"
static int imx6sx_idle_finish(unsigned long val)
{
@@ -99,8 +100,12 @@ static struct cpuidle_driver imx6sx_cpuidle_driver = {
.safe_state_index = 0,
};
+#define SW2ISO_ORIGINAL 0x2
+#define SW2ISO_IMX6SX 0xf
int __init imx6sx_cpuidle_init(void)
{
+ u32 sw2iso = SW2ISO_ORIGINAL;
+
imx6_set_int_mem_clk_lpm(true);
imx6_enable_rbc(false);
imx_gpc_set_l2_mem_power_in_lpm(false);
@@ -110,7 +115,9 @@ int __init imx6sx_cpuidle_init(void)
* except for power up sw2iso which need to be
* larger than LDO ramp up time.
*/
- imx_gpc_set_arm_power_up_timing(0xf, 1);
+ if (cpu_is_imx6sx())
+ sw2iso = SW2ISO_IMX6SX;
+ imx_gpc_set_arm_power_up_timing(sw2iso, 1);
imx_gpc_set_arm_power_down_timing(1, 1);
return cpuidle_register(&imx6sx_cpuidle_driver, NULL);
--
2.17.1
This is a note to let you know that I've just added the patch titled
staging: most: cdev: fix chrdev_region leak in mod_exit
to my staging git tree which can be found at
git://git.kernel.org/pub/scm/linux/kernel/git/gregkh/staging.git
in the staging-next branch.
The patch will show up in the next release of the linux-next tree
(usually sometime within the next 24 hours during the week.)
The patch will also be merged in the next major kernel release
during the merge window.
If you have any questions about this process, please let me know.
>From af708900e9a48c0aa46070c8a8cdf0608a1d2025 Mon Sep 17 00:00:00 2001
From: Suresh Udipi <sudipi(a)jp.adit-jv.com>
Date: Wed, 24 Apr 2019 21:23:43 +0200
Subject: staging: most: cdev: fix chrdev_region leak in mod_exit
It looks like v4.18-rc1 commit [0] which upstreams mld-1.8.0
commit [1] missed to fix the memory leak in mod_exit function.
Do it now.
[0] aba258b7310167 ("staging: most: cdev: fix chrdev_region leak")
[1] https://github.com/microchip-ais/linux/commit/a2d8f7ae7ea381
("staging: most: cdev: fix leak for chrdev_region")
Signed-off-by: Suresh Udipi <sudipi(a)jp.adit-jv.com>
Signed-off-by: Eugeniu Rosca <erosca(a)de.adit-jv.com>
Acked-by: Christian Gromm <christian.gromm(a)microchip.com>
Fixes: aba258b73101 ("staging: most: cdev: fix chrdev_region leak")
Cc: stable <stable(a)vger.kernel.org>
Signed-off-by: Greg Kroah-Hartman <gregkh(a)linuxfoundation.org>
---
drivers/staging/most/cdev/cdev.c | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/drivers/staging/most/cdev/cdev.c b/drivers/staging/most/cdev/cdev.c
index d98977c57a4b..d0cc0b746107 100644
--- a/drivers/staging/most/cdev/cdev.c
+++ b/drivers/staging/most/cdev/cdev.c
@@ -555,7 +555,7 @@ static void __exit mod_exit(void)
destroy_cdev(c);
destroy_channel(c);
}
- unregister_chrdev_region(comp.devno, 1);
+ unregister_chrdev_region(comp.devno, CHRDEV_REGION_SIZE);
ida_destroy(&comp.minor_id);
class_destroy(comp.class);
}
--
2.21.0