Re: [PATCH 2/2] bitmap: Add test for bitmap_cut()

From: Andy Shevchenko
Date: Mon Jun 15 2020 - 05:46:21 EST


On Mon, Jun 15, 2020 at 12:41:55PM +0300, Andy Shevchenko wrote:
> On Sun, Jun 14, 2020 at 07:40:54PM +0200, Stefano Brivio wrote:
> > Inspired by an original patch from Yury Norov: introduce a test for
> > bitmap_cut() that also makes sure functionality is as described for
> > partially overlapping src and dst.
>
> Taking into account recent fixes for BE 64-bit, do we have test cases for a such?

It might be enough to have only these, but perhaps s390 guys can help?

Alexander, can you apply this patch (w/o the first one, which is suppose to
fix) and confirm that you have test case failure, followed by applying first
one and confirm a fix?

> > Signed-off-by: Stefano Brivio <sbrivio@xxxxxxxxxx>
> > ---
> > v2:
> > - use expect_eq_bitmap() instead of open coding result check (Andy
> > Shevchenko)
> > - don't use uncommon Co-authored-by: tag (Andy Shevchenko), drop
> > it altogether as Yury asked me to go ahead with this and I haven't
> > heard back in a while. Patch is now rather different anyway
> > - avoid stack overflow, buffer needs to be five unsigned longs and
> > not four as 'in' is shifted by one, spotted by kernel test robot
> > with CONFIG_STACKPROTECTOR_STRONG
> >
> > lib/test_bitmap.c | 58 +++++++++++++++++++++++++++++++++++++++++++++++
> > 1 file changed, 58 insertions(+)
> >
> > diff --git a/lib/test_bitmap.c b/lib/test_bitmap.c
> > index 6b13150667f5..df903c53952b 100644
> > --- a/lib/test_bitmap.c
> > +++ b/lib/test_bitmap.c
> > @@ -610,6 +610,63 @@ static void __init test_for_each_set_clump8(void)
> > expect_eq_clump8(start, CLUMP_EXP_NUMBITS, clump_exp, &clump);
> > }
> >
> > +struct test_bitmap_cut {
> > + unsigned int first;
> > + unsigned int cut;
> > + unsigned int nbits;
> > + unsigned long in[4];
> > + unsigned long expected[4];
> > +};
> > +
> > +static struct test_bitmap_cut test_cut[] = {
> > + { 0, 0, 8, { 0x0000000aUL, }, { 0x0000000aUL, }, },
> > + { 0, 0, 32, { 0xdadadeadUL, }, { 0xdadadeadUL, }, },
> > + { 0, 3, 8, { 0x000000aaUL, }, { 0x00000015UL, }, },
> > + { 3, 3, 8, { 0x000000aaUL, }, { 0x00000012UL, }, },
> > + { 0, 1, 32, { 0xa5a5a5a5UL, }, { 0x52d2d2d2UL, }, },
> > + { 0, 8, 32, { 0xdeadc0deUL, }, { 0x00deadc0UL, }, },
> > + { 1, 1, 32, { 0x5a5a5a5aUL, }, { 0x2d2d2d2cUL, }, },
> > + { 0, 15, 32, { 0xa5a5a5a5UL, }, { 0x00014b4bUL, }, },
> > + { 0, 16, 32, { 0xa5a5a5a5UL, }, { 0x0000a5a5UL, }, },
> > + { 15, 15, 32, { 0xa5a5a5a5UL, }, { 0x000125a5UL, }, },
> > + { 15, 16, 32, { 0xa5a5a5a5UL, }, { 0x0000a5a5UL, }, },
> > + { 16, 15, 32, { 0xa5a5a5a5UL, }, { 0x0001a5a5UL, }, },
> > +
> > + { BITS_PER_LONG, BITS_PER_LONG, BITS_PER_LONG,
> > + { 0xa5a5a5a5UL, 0xa5a5a5a5UL, },
> > + { 0xa5a5a5a5UL, 0xa5a5a5a5UL, },
> > + },
> > + { 1, BITS_PER_LONG - 1, BITS_PER_LONG,
> > + { 0xa5a5a5a5UL, 0xa5a5a5a5UL, },
> > + { 0x00000001UL, 0x00000001UL, },
> > + },
> > +
> > + { 0, BITS_PER_LONG * 2, BITS_PER_LONG * 2 + 1,
> > + { 0xa5a5a5a5UL, 0x00000001UL, 0x00000001UL, 0x00000001UL },
> > + { 0x00000001UL, },
> > + },
> > + { 16, BITS_PER_LONG * 2 + 1, BITS_PER_LONG * 2 + 1 + 16,
> > + { 0x0000ffffUL, 0x5a5a5a5aUL, 0x5a5a5a5aUL, 0x5a5a5a5aUL },
> > + { 0x2d2dffffUL, },
> > + },
> > +};
> > +
> > +static void __init test_bitmap_cut(void)
> > +{
> > + unsigned long b[5], *in = &b[1], *out = &b[0]; /* Partial overlap */
> > + int i;
> > +
> > + for (i = 0; i < ARRAY_SIZE(test_cut); i++) {
> > + struct test_bitmap_cut *t = &test_cut[i];
> > +
> > + memcpy(in, t->in, sizeof(t->in));
> > +
> > + bitmap_cut(out, in, t->first, t->cut, t->nbits);
> > +
> > + expect_eq_bitmap(t->expected, out, t->nbits);
> > + }
> > +}
> > +
> > static void __init selftest(void)
> > {
> > test_zero_clear();
> > @@ -623,6 +680,7 @@ static void __init selftest(void)
> > test_bitmap_parselist_user();
> > test_mem_optimisations();
> > test_for_each_set_clump8();
> > + test_bitmap_cut();
> > }
> >
> > KSTM_MODULE_LOADERS(test_bitmap);
> > --
> > 2.27.0
> >
>
> --
> With Best Regards,
> Andy Shevchenko
>
>

--
With Best Regards,
Andy Shevchenko