The patch titled
Subject: mm: initialize struct pages in reserved regions outside of the zone ranges
has been removed from the -mm tree. Its filename was
mm-initialize-struct-pages-in-reserved-regions-outside-of-the-zone-ranges.patch
This patch was dropped because an updated version will be merged
------------------------------------------------------
From: Andrea Arcangeli <aarcange(a)redhat.com>
Subject: mm: initialize struct pages in reserved regions outside of the zone ranges
Without this change, the pfn 0 isn't in any zone spanned range, and it's
also not in any memory.memblock range, so the struct page of pfn 0 wasn't
initialized and the PagePoison remained set when reserve_bootmem_region
called __SetPageReserved, inducing a silent boot failure with DEBUG_VM
(and correctly so, because the crash signaled the nodeid/nid of pfn 0
would be again wrong).
There's no enforcement that all memblock.reserved ranges must overlap
memblock.memory ranges, so the memblock.reserved ranges also require an
explicit initialization and the zones ranges need to be extended to
include all memblock.reserved ranges with struct pages too or they'll be
left uninitialized with PagePoison as it happened to pfn 0.
Link: https://lkml.kernel.org/r/20201205013238.21663-2-aarcange@redhat.com
Fixes: 73a6e474cb37 ("mm: memmap_init: iterate over memblock regions rather that check each PFN")
Signed-off-by: Andrea Arcangeli <aarcange(a)redhat.com>
Cc: Mike Rapoport <rppt(a)linux.ibm.com>
Cc: Baoquan He <bhe(a)redhat.com>
Cc: David Hildenbrand <david(a)redhat.com>
Cc: Mel Gorman <mgorman(a)suse.de>
Cc: Michal Hocko <mhocko(a)kernel.org>
Cc: Qian Cai <cai(a)lca.pw>
Cc: Vlastimil Babka <vbabka(a)suse.cz>
Cc: <stable(a)vger.kernel.org>
Signed-off-by: Andrew Morton <akpm(a)linux-foundation.org>
---
include/linux/memblock.h | 17 ++++++++---
mm/debug.c | 3 +
mm/memblock.c | 4 +-
mm/page_alloc.c | 57 +++++++++++++++++++++++++++++--------
4 files changed, 63 insertions(+), 18 deletions(-)
--- a/include/linux/memblock.h~mm-initialize-struct-pages-in-reserved-regions-outside-of-the-zone-ranges
+++ a/include/linux/memblock.h
@@ -251,7 +251,8 @@ static inline bool memblock_is_nomap(str
int memblock_search_pfn_nid(unsigned long pfn, unsigned long *start_pfn,
unsigned long *end_pfn);
void __next_mem_pfn_range(int *idx, int nid, unsigned long *out_start_pfn,
- unsigned long *out_end_pfn, int *out_nid);
+ unsigned long *out_end_pfn, int *out_nid,
+ struct memblock_type *type);
/**
* for_each_mem_pfn_range - early memory pfn range iterator
@@ -263,9 +264,17 @@ void __next_mem_pfn_range(int *idx, int
*
* Walks over configured memory ranges.
*/
-#define for_each_mem_pfn_range(i, nid, p_start, p_end, p_nid) \
- for (i = -1, __next_mem_pfn_range(&i, nid, p_start, p_end, p_nid); \
- i >= 0; __next_mem_pfn_range(&i, nid, p_start, p_end, p_nid))
+#define for_each_mem_pfn_range(i, nid, p_start, p_end, p_nid) \
+ for (i = -1, __next_mem_pfn_range(&i, nid, p_start, p_end, p_nid, \
+ &memblock.memory); \
+ i >= 0; __next_mem_pfn_range(&i, nid, p_start, p_end, p_nid, \
+ &memblock.memory))
+
+#define for_each_res_pfn_range(i, nid, p_start, p_end, p_nid) \
+ for (i = -1, __next_mem_pfn_range(&i, nid, p_start, p_end, p_nid, \
+ &memblock.reserved); \
+ i >= 0; __next_mem_pfn_range(&i, nid, p_start, p_end, p_nid, \
+ &memblock.reserved))
#ifdef CONFIG_DEFERRED_STRUCT_PAGE_INIT
void __next_mem_pfn_range_in_zone(u64 *idx, struct zone *zone,
--- a/mm/debug.c~mm-initialize-struct-pages-in-reserved-regions-outside-of-the-zone-ranges
+++ a/mm/debug.c
@@ -64,7 +64,8 @@ void __dump_page(struct page *page, cons
* dump_page() when detected.
*/
if (page_poisoned) {
- pr_warn("page:%px is uninitialized and poisoned", page);
+ pr_warn("page:%px pfn:%ld is uninitialized and poisoned",
+ page, page_to_pfn(page));
goto hex_only;
}
--- a/mm/memblock.c~mm-initialize-struct-pages-in-reserved-regions-outside-of-the-zone-ranges
+++ a/mm/memblock.c
@@ -1198,9 +1198,9 @@ void __init_memblock __next_mem_range_re
*/
void __init_memblock __next_mem_pfn_range(int *idx, int nid,
unsigned long *out_start_pfn,
- unsigned long *out_end_pfn, int *out_nid)
+ unsigned long *out_end_pfn, int *out_nid,
+ struct memblock_type *type)
{
- struct memblock_type *type = &memblock.memory;
struct memblock_region *r;
int r_nid;
--- a/mm/page_alloc.c~mm-initialize-struct-pages-in-reserved-regions-outside-of-the-zone-ranges
+++ a/mm/page_alloc.c
@@ -1458,6 +1458,7 @@ static void __meminit init_reserved_page
{
pg_data_t *pgdat;
int nid, zid;
+ bool found = false;
if (!early_page_uninitialised(pfn))
return;
@@ -1468,10 +1469,15 @@ static void __meminit init_reserved_page
for (zid = 0; zid < MAX_NR_ZONES; zid++) {
struct zone *zone = &pgdat->node_zones[zid];
- if (pfn >= zone->zone_start_pfn && pfn < zone_end_pfn(zone))
+ if (pfn >= zone->zone_start_pfn && pfn < zone_end_pfn(zone)) {
+ found = true;
break;
+ }
}
- __init_single_page(pfn_to_page(pfn), pfn, zid, nid);
+ if (likely(found))
+ __init_single_page(pfn_to_page(pfn), pfn, zid, nid);
+ else
+ WARN_ON_ONCE(1);
}
#else
static inline void init_reserved_page(unsigned long pfn)
@@ -6227,7 +6233,7 @@ void __init __weak memmap_init(unsigned
unsigned long zone,
unsigned long range_start_pfn)
{
- unsigned long start_pfn, end_pfn, next_pfn = 0;
+ unsigned long start_pfn, end_pfn, prev_pfn = 0;
unsigned long range_end_pfn = range_start_pfn + size;
u64 pgcnt = 0;
int i;
@@ -6235,7 +6241,7 @@ void __init __weak memmap_init(unsigned
for_each_mem_pfn_range(i, nid, &start_pfn, &end_pfn, NULL) {
start_pfn = clamp(start_pfn, range_start_pfn, range_end_pfn);
end_pfn = clamp(end_pfn, range_start_pfn, range_end_pfn);
- next_pfn = clamp(next_pfn, range_start_pfn, range_end_pfn);
+ prev_pfn = clamp(prev_pfn, range_start_pfn, range_end_pfn);
if (end_pfn > start_pfn) {
size = end_pfn - start_pfn;
@@ -6243,10 +6249,10 @@ void __init __weak memmap_init(unsigned
MEMINIT_EARLY, NULL, MIGRATE_MOVABLE);
}
- if (next_pfn < start_pfn)
- pgcnt += init_unavailable_range(next_pfn, start_pfn,
+ if (prev_pfn < start_pfn)
+ pgcnt += init_unavailable_range(prev_pfn, start_pfn,
zone, nid);
- next_pfn = end_pfn;
+ prev_pfn = end_pfn;
}
/*
@@ -6256,12 +6262,31 @@ void __init __weak memmap_init(unsigned
* considered initialized. Make sure that memmap has a well defined
* state.
*/
- if (next_pfn < range_end_pfn)
- pgcnt += init_unavailable_range(next_pfn, range_end_pfn,
+ if (prev_pfn < range_end_pfn)
+ pgcnt += init_unavailable_range(prev_pfn, range_end_pfn,
zone, nid);
+ /*
+ * memblock.reserved isn't enforced to overlap with
+ * memblock.memory so initialize the struct pages for
+ * memblock.reserved too in case it wasn't overlapping.
+ *
+ * If any struct page associated with a memblock.reserved
+ * range isn't overlapping with a zone range, it'll be left
+ * uninitialized, ideally with PagePoison, and it'll be a more
+ * easily detectable error.
+ */
+ for_each_res_pfn_range(i, nid, &start_pfn, &end_pfn, NULL) {
+ start_pfn = clamp(start_pfn, range_start_pfn, range_end_pfn);
+ end_pfn = clamp(end_pfn, range_start_pfn, range_end_pfn);
+
+ if (end_pfn > start_pfn)
+ pgcnt += init_unavailable_range(start_pfn, end_pfn,
+ zone, nid);
+ }
+
if (pgcnt)
- pr_info("%s: Zeroed struct page in unavailable ranges: %lld\n",
+ pr_info("%s: pages in unavailable ranges: %lld\n",
zone_names[zone], pgcnt);
}
@@ -6499,6 +6524,10 @@ void __init get_pfn_range_for_nid(unsign
*start_pfn = min(*start_pfn, this_start_pfn);
*end_pfn = max(*end_pfn, this_end_pfn);
}
+ for_each_res_pfn_range(i, nid, &this_start_pfn, &this_end_pfn, NULL) {
+ *start_pfn = min(*start_pfn, this_start_pfn);
+ *end_pfn = max(*end_pfn, this_end_pfn);
+ }
if (*start_pfn == -1UL)
*start_pfn = 0;
@@ -7126,7 +7155,13 @@ unsigned long __init node_map_pfn_alignm
*/
unsigned long __init find_min_pfn_with_active_regions(void)
{
- return PHYS_PFN(memblock_start_of_DRAM());
+ /*
+ * reserved regions must be included so that their page
+ * structure can be part of a zone and obtain a valid zoneid
+ * before __SetPageReserved().
+ */
+ return min(PHYS_PFN(memblock_start_of_DRAM()),
+ PHYS_PFN(memblock.reserved.regions[0].base));
}
/*
_
Patches currently in -mm which might be from aarcange(a)redhat.com are
The patch titled
Subject: mm: initialize struct pages in reserved regions outside of the zone ranges
has been added to the -mm tree. Its filename is
mm-initialize-struct-pages-in-reserved-regions-outside-of-the-zone-ranges.patch
This patch should soon appear at
https://ozlabs.org/~akpm/mmots/broken-out/mm-initialize-struct-pages-in-res…
and later at
https://ozlabs.org/~akpm/mmotm/broken-out/mm-initialize-struct-pages-in-res…
Before you just go and hit "reply", please:
a) Consider who else should be cc'ed
b) Prefer to cc a suitable mailing list as well
c) Ideally: find the original patch on the mailing list and do a
reply-to-all to that, adding suitable additional cc's
*** Remember to use Documentation/process/submit-checklist.rst when testing your code ***
The -mm tree is included into linux-next and is updated
there every 3-4 working days
------------------------------------------------------
From: Andrea Arcangeli <aarcange(a)redhat.com>
Subject: mm: initialize struct pages in reserved regions outside of the zone ranges
Without this change, the pfn 0 isn't in any zone spanned range, and it's
also not in any memory.memblock range, so the struct page of pfn 0 wasn't
initialized and the PagePoison remained set when reserve_bootmem_region
called __SetPageReserved, inducing a silent boot failure with DEBUG_VM
(and correctly so, because the crash signaled the nodeid/nid of pfn 0
would be again wrong).
There's no enforcement that all memblock.reserved ranges must overlap
memblock.memory ranges, so the memblock.reserved ranges also require an
explicit initialization and the zones ranges need to be extended to
include all memblock.reserved ranges with struct pages too or they'll be
left uninitialized with PagePoison as it happened to pfn 0.
Link: https://lkml.kernel.org/r/20201205013238.21663-2-aarcange@redhat.com
Fixes: 73a6e474cb37 ("mm: memmap_init: iterate over memblock regions rather that check each PFN")
Signed-off-by: Andrea Arcangeli <aarcange(a)redhat.com>
Cc: Mike Rapoport <rppt(a)linux.ibm.com>
Cc: Baoquan He <bhe(a)redhat.com>
Cc: David Hildenbrand <david(a)redhat.com>
Cc: Mel Gorman <mgorman(a)suse.de>
Cc: Michal Hocko <mhocko(a)kernel.org>
Cc: Qian Cai <cai(a)lca.pw>
Cc: Vlastimil Babka <vbabka(a)suse.cz>
Cc: <stable(a)vger.kernel.org>
Signed-off-by: Andrew Morton <akpm(a)linux-foundation.org>
---
include/linux/memblock.h | 17 ++++++++---
mm/debug.c | 3 +
mm/memblock.c | 4 +-
mm/page_alloc.c | 57 +++++++++++++++++++++++++++++--------
4 files changed, 63 insertions(+), 18 deletions(-)
--- a/include/linux/memblock.h~mm-initialize-struct-pages-in-reserved-regions-outside-of-the-zone-ranges
+++ a/include/linux/memblock.h
@@ -251,7 +251,8 @@ static inline bool memblock_is_nomap(str
int memblock_search_pfn_nid(unsigned long pfn, unsigned long *start_pfn,
unsigned long *end_pfn);
void __next_mem_pfn_range(int *idx, int nid, unsigned long *out_start_pfn,
- unsigned long *out_end_pfn, int *out_nid);
+ unsigned long *out_end_pfn, int *out_nid,
+ struct memblock_type *type);
/**
* for_each_mem_pfn_range - early memory pfn range iterator
@@ -263,9 +264,17 @@ void __next_mem_pfn_range(int *idx, int
*
* Walks over configured memory ranges.
*/
-#define for_each_mem_pfn_range(i, nid, p_start, p_end, p_nid) \
- for (i = -1, __next_mem_pfn_range(&i, nid, p_start, p_end, p_nid); \
- i >= 0; __next_mem_pfn_range(&i, nid, p_start, p_end, p_nid))
+#define for_each_mem_pfn_range(i, nid, p_start, p_end, p_nid) \
+ for (i = -1, __next_mem_pfn_range(&i, nid, p_start, p_end, p_nid, \
+ &memblock.memory); \
+ i >= 0; __next_mem_pfn_range(&i, nid, p_start, p_end, p_nid, \
+ &memblock.memory))
+
+#define for_each_res_pfn_range(i, nid, p_start, p_end, p_nid) \
+ for (i = -1, __next_mem_pfn_range(&i, nid, p_start, p_end, p_nid, \
+ &memblock.reserved); \
+ i >= 0; __next_mem_pfn_range(&i, nid, p_start, p_end, p_nid, \
+ &memblock.reserved))
#ifdef CONFIG_DEFERRED_STRUCT_PAGE_INIT
void __next_mem_pfn_range_in_zone(u64 *idx, struct zone *zone,
--- a/mm/debug.c~mm-initialize-struct-pages-in-reserved-regions-outside-of-the-zone-ranges
+++ a/mm/debug.c
@@ -64,7 +64,8 @@ void __dump_page(struct page *page, cons
* dump_page() when detected.
*/
if (page_poisoned) {
- pr_warn("page:%px is uninitialized and poisoned", page);
+ pr_warn("page:%px pfn:%ld is uninitialized and poisoned",
+ page, page_to_pfn(page));
goto hex_only;
}
--- a/mm/memblock.c~mm-initialize-struct-pages-in-reserved-regions-outside-of-the-zone-ranges
+++ a/mm/memblock.c
@@ -1198,9 +1198,9 @@ void __init_memblock __next_mem_range_re
*/
void __init_memblock __next_mem_pfn_range(int *idx, int nid,
unsigned long *out_start_pfn,
- unsigned long *out_end_pfn, int *out_nid)
+ unsigned long *out_end_pfn, int *out_nid,
+ struct memblock_type *type)
{
- struct memblock_type *type = &memblock.memory;
struct memblock_region *r;
int r_nid;
--- a/mm/page_alloc.c~mm-initialize-struct-pages-in-reserved-regions-outside-of-the-zone-ranges
+++ a/mm/page_alloc.c
@@ -1458,6 +1458,7 @@ static void __meminit init_reserved_page
{
pg_data_t *pgdat;
int nid, zid;
+ bool found = false;
if (!early_page_uninitialised(pfn))
return;
@@ -1468,10 +1469,15 @@ static void __meminit init_reserved_page
for (zid = 0; zid < MAX_NR_ZONES; zid++) {
struct zone *zone = &pgdat->node_zones[zid];
- if (pfn >= zone->zone_start_pfn && pfn < zone_end_pfn(zone))
+ if (pfn >= zone->zone_start_pfn && pfn < zone_end_pfn(zone)) {
+ found = true;
break;
+ }
}
- __init_single_page(pfn_to_page(pfn), pfn, zid, nid);
+ if (likely(found))
+ __init_single_page(pfn_to_page(pfn), pfn, zid, nid);
+ else
+ WARN_ON_ONCE(1);
}
#else
static inline void init_reserved_page(unsigned long pfn)
@@ -6227,7 +6233,7 @@ void __init __weak memmap_init(unsigned
unsigned long zone,
unsigned long range_start_pfn)
{
- unsigned long start_pfn, end_pfn, next_pfn = 0;
+ unsigned long start_pfn, end_pfn, prev_pfn = 0;
unsigned long range_end_pfn = range_start_pfn + size;
u64 pgcnt = 0;
int i;
@@ -6235,7 +6241,7 @@ void __init __weak memmap_init(unsigned
for_each_mem_pfn_range(i, nid, &start_pfn, &end_pfn, NULL) {
start_pfn = clamp(start_pfn, range_start_pfn, range_end_pfn);
end_pfn = clamp(end_pfn, range_start_pfn, range_end_pfn);
- next_pfn = clamp(next_pfn, range_start_pfn, range_end_pfn);
+ prev_pfn = clamp(prev_pfn, range_start_pfn, range_end_pfn);
if (end_pfn > start_pfn) {
size = end_pfn - start_pfn;
@@ -6243,10 +6249,10 @@ void __init __weak memmap_init(unsigned
MEMINIT_EARLY, NULL, MIGRATE_MOVABLE);
}
- if (next_pfn < start_pfn)
- pgcnt += init_unavailable_range(next_pfn, start_pfn,
+ if (prev_pfn < start_pfn)
+ pgcnt += init_unavailable_range(prev_pfn, start_pfn,
zone, nid);
- next_pfn = end_pfn;
+ prev_pfn = end_pfn;
}
/*
@@ -6256,12 +6262,31 @@ void __init __weak memmap_init(unsigned
* considered initialized. Make sure that memmap has a well defined
* state.
*/
- if (next_pfn < range_end_pfn)
- pgcnt += init_unavailable_range(next_pfn, range_end_pfn,
+ if (prev_pfn < range_end_pfn)
+ pgcnt += init_unavailable_range(prev_pfn, range_end_pfn,
zone, nid);
+ /*
+ * memblock.reserved isn't enforced to overlap with
+ * memblock.memory so initialize the struct pages for
+ * memblock.reserved too in case it wasn't overlapping.
+ *
+ * If any struct page associated with a memblock.reserved
+ * range isn't overlapping with a zone range, it'll be left
+ * uninitialized, ideally with PagePoison, and it'll be a more
+ * easily detectable error.
+ */
+ for_each_res_pfn_range(i, nid, &start_pfn, &end_pfn, NULL) {
+ start_pfn = clamp(start_pfn, range_start_pfn, range_end_pfn);
+ end_pfn = clamp(end_pfn, range_start_pfn, range_end_pfn);
+
+ if (end_pfn > start_pfn)
+ pgcnt += init_unavailable_range(start_pfn, end_pfn,
+ zone, nid);
+ }
+
if (pgcnt)
- pr_info("%s: Zeroed struct page in unavailable ranges: %lld\n",
+ pr_info("%s: pages in unavailable ranges: %lld\n",
zone_names[zone], pgcnt);
}
@@ -6499,6 +6524,10 @@ void __init get_pfn_range_for_nid(unsign
*start_pfn = min(*start_pfn, this_start_pfn);
*end_pfn = max(*end_pfn, this_end_pfn);
}
+ for_each_res_pfn_range(i, nid, &this_start_pfn, &this_end_pfn, NULL) {
+ *start_pfn = min(*start_pfn, this_start_pfn);
+ *end_pfn = max(*end_pfn, this_end_pfn);
+ }
if (*start_pfn == -1UL)
*start_pfn = 0;
@@ -7126,7 +7155,13 @@ unsigned long __init node_map_pfn_alignm
*/
unsigned long __init find_min_pfn_with_active_regions(void)
{
- return PHYS_PFN(memblock_start_of_DRAM());
+ /*
+ * reserved regions must be included so that their page
+ * structure can be part of a zone and obtain a valid zoneid
+ * before __SetPageReserved().
+ */
+ return min(PHYS_PFN(memblock_start_of_DRAM()),
+ PHYS_PFN(memblock.reserved.regions[0].base));
}
/*
_
Patches currently in -mm which might be from aarcange(a)redhat.com are
mm-initialize-struct-pages-in-reserved-regions-outside-of-the-zone-ranges.patch
This is a note to let you know that I've just added the patch titled
USB: UAS: introduce a quirk to set no_write_same
to my usb git tree which can be found at
git://git.kernel.org/pub/scm/linux/kernel/git/gregkh/usb.git
in the usb-testing branch.
The patch will show up in the next release of the linux-next tree
(usually sometime within the next 24 hours during the week.)
The patch will be merged to the usb-next branch sometime soon,
after it passes testing, and the merge window is open.
If you have any questions about this process, please let me know.
>From 8010622c86ca5bb44bc98492f5968726fc7c7a21 Mon Sep 17 00:00:00 2001
From: Oliver Neukum <oneukum(a)suse.com>
Date: Wed, 9 Dec 2020 16:26:39 +0100
Subject: USB: UAS: introduce a quirk to set no_write_same
UAS does not share the pessimistic assumption storage is making that
devices cannot deal with WRITE_SAME. A few devices supported by UAS,
are reported to not deal well with WRITE_SAME. Those need a quirk.
Add it to the device that needs it.
Reported-by: David C. Partridge <david.partridge(a)perdrix.co.uk>
Signed-off-by: Oliver Neukum <oneukum(a)suse.com>
Cc: stable <stable(a)vger.kernel.org>
Link: https://lore.kernel.org/r/20201209152639.9195-1-oneukum@suse.com
Signed-off-by: Greg Kroah-Hartman <gregkh(a)linuxfoundation.org>
---
Documentation/admin-guide/kernel-parameters.txt | 1 +
drivers/usb/storage/uas.c | 3 +++
drivers/usb/storage/unusual_uas.h | 7 +++++--
drivers/usb/storage/usb.c | 3 +++
include/linux/usb_usual.h | 2 ++
5 files changed, 14 insertions(+), 2 deletions(-)
diff --git a/Documentation/admin-guide/kernel-parameters.txt b/Documentation/admin-guide/kernel-parameters.txt
index 44fde25bb221..f6a1513dfb76 100644
--- a/Documentation/admin-guide/kernel-parameters.txt
+++ b/Documentation/admin-guide/kernel-parameters.txt
@@ -5663,6 +5663,7 @@
device);
j = NO_REPORT_LUNS (don't use report luns
command, uas only);
+ k = NO_SAME (do not use WRITE_SAME, uas only)
l = NOT_LOCKABLE (don't try to lock and
unlock ejectable media, not on uas);
m = MAX_SECTORS_64 (don't transfer more
diff --git a/drivers/usb/storage/uas.c b/drivers/usb/storage/uas.c
index 56422c4b4ff3..bef89c6bd1d7 100644
--- a/drivers/usb/storage/uas.c
+++ b/drivers/usb/storage/uas.c
@@ -868,6 +868,9 @@ static int uas_slave_configure(struct scsi_device *sdev)
if (devinfo->flags & US_FL_NO_READ_CAPACITY_16)
sdev->no_read_capacity_16 = 1;
+ /* Some disks cannot handle WRITE_SAME */
+ if (devinfo->flags & US_FL_NO_SAME)
+ sdev->no_write_same = 1;
/*
* Some disks return the total number of blocks in response
* to READ CAPACITY rather than the highest block number.
diff --git a/drivers/usb/storage/unusual_uas.h b/drivers/usb/storage/unusual_uas.h
index 711ab240058c..870e9cf3d5dc 100644
--- a/drivers/usb/storage/unusual_uas.h
+++ b/drivers/usb/storage/unusual_uas.h
@@ -35,12 +35,15 @@ UNUSUAL_DEV(0x054c, 0x087d, 0x0000, 0x9999,
USB_SC_DEVICE, USB_PR_DEVICE, NULL,
US_FL_NO_REPORT_OPCODES),
-/* Reported-by: Julian Groß <julian.g(a)posteo.de> */
+/*
+ * Initially Reported-by: Julian Groß <julian.g(a)posteo.de>
+ * Further reports David C. Partridge <david.partridge(a)perdrix.co.uk>
+ */
UNUSUAL_DEV(0x059f, 0x105f, 0x0000, 0x9999,
"LaCie",
"2Big Quadra USB3",
USB_SC_DEVICE, USB_PR_DEVICE, NULL,
- US_FL_NO_REPORT_OPCODES),
+ US_FL_NO_REPORT_OPCODES | US_FL_NO_SAME),
/*
* Apricorn USB3 dongle sometimes returns "USBSUSBSUSBS" in response to SCSI
diff --git a/drivers/usb/storage/usb.c b/drivers/usb/storage/usb.c
index 94a64729dc27..90aa9c12ffac 100644
--- a/drivers/usb/storage/usb.c
+++ b/drivers/usb/storage/usb.c
@@ -541,6 +541,9 @@ void usb_stor_adjust_quirks(struct usb_device *udev, unsigned long *fflags)
case 'j':
f |= US_FL_NO_REPORT_LUNS;
break;
+ case 'k':
+ f |= US_FL_NO_SAME;
+ break;
case 'l':
f |= US_FL_NOT_LOCKABLE;
break;
diff --git a/include/linux/usb_usual.h b/include/linux/usb_usual.h
index 4a19ac3f24d0..6b03fdd69d27 100644
--- a/include/linux/usb_usual.h
+++ b/include/linux/usb_usual.h
@@ -84,6 +84,8 @@
/* Cannot handle REPORT_LUNS */ \
US_FLAG(ALWAYS_SYNC, 0x20000000) \
/* lies about caching, so always sync */ \
+ US_FLAG(NO_SAME, 0x40000000) \
+ /* Cannot handle WRITE_SAME */ \
#define US_FLAG(name, value) US_FL_##name = value ,
enum { US_DO_ALL_FLAGS };
--
2.29.2