For LUN crossing boundaries, it is handy to know what is the index of the last page in a LUN. This helper will soon be reused. At the same time I rename page_per_lun to ppl in the calling function to clarify the lines.
Cc: stable@vger.kernel.org Signed-off-by: Miquel Raynal miquel.raynal@bootlin.com --- This is a dependency for the next patch, so I Cc'd stable on it as well. --- drivers/mtd/nand/raw/nand_base.c | 16 +++++++++++----- 1 file changed, 11 insertions(+), 5 deletions(-)
diff --git a/drivers/mtd/nand/raw/nand_base.c b/drivers/mtd/nand/raw/nand_base.c index bcfd99a1699f..d6a27e08b112 100644 --- a/drivers/mtd/nand/raw/nand_base.c +++ b/drivers/mtd/nand/raw/nand_base.c @@ -1211,19 +1211,25 @@ static int nand_lp_exec_read_page_op(struct nand_chip *chip, unsigned int page, return nand_exec_op(chip, &op); }
+static unsigned int rawnand_last_page_of_lun(unsigned int pages_per_lun, unsigned int lun) +{ + /* lun is expected to be very small */ + return (lun * pages_per_lun) + pages_per_lun - 1; +} + static void rawnand_cap_cont_reads(struct nand_chip *chip) { struct nand_memory_organization *memorg; - unsigned int pages_per_lun, first_lun, last_lun; + unsigned int ppl, first_lun, last_lun;
memorg = nanddev_get_memorg(&chip->base); - pages_per_lun = memorg->pages_per_eraseblock * memorg->eraseblocks_per_lun; - first_lun = chip->cont_read.first_page / pages_per_lun; - last_lun = chip->cont_read.last_page / pages_per_lun; + ppl = memorg->pages_per_eraseblock * memorg->eraseblocks_per_lun; + first_lun = chip->cont_read.first_page / ppl; + last_lun = chip->cont_read.last_page / ppl;
/* Prevent sequential cache reads across LUN boundaries */ if (first_lun != last_lun) - chip->cont_read.pause_page = first_lun * pages_per_lun + pages_per_lun - 1; + chip->cont_read.pause_page = rawnand_last_page_of_lun(ppl, first_lun); else chip->cont_read.pause_page = chip->cont_read.last_page; }
linux-stable-mirror@lists.linaro.org