Allow zero parity shards (#161)

master
Shawn Zivontsis 2021-03-08 10:13:24 -05:00 committed by GitHub
parent ab26eb4126
commit 0e7f9a6a6f
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
3 changed files with 117 additions and 6 deletions

View File

@ -117,8 +117,9 @@ type reedSolomon struct {
}
// ErrInvShardNum will be returned by New, if you attempt to create
// an Encoder where either data or parity shards is zero or less.
var ErrInvShardNum = errors.New("cannot create Encoder with zero or less data/parity shards")
// an Encoder with less than one data shard or less than zero parity
// shards.
var ErrInvShardNum = errors.New("cannot create Encoder with less than one data shard or less than zero parity shards")
// ErrMaxShardNum will be returned by New, if you attempt to create an
// Encoder where data and parity shards are bigger than the order of
@ -249,7 +250,7 @@ func New(dataShards, parityShards int, opts ...Option) (Encoder, error) {
for _, opt := range opts {
opt(&r.o)
}
if dataShards <= 0 || parityShards <= 0 {
if dataShards <= 0 || parityShards < 0 {
return nil, ErrInvShardNum
}
@ -257,6 +258,10 @@ func New(dataShards, parityShards int, opts ...Option) (Encoder, error) {
return nil, ErrMaxShardNum
}
if parityShards == 0 {
return &r, nil
}
var err error
switch {
case r.o.fastOneParity && parityShards == 1:
@ -423,6 +428,10 @@ func (r *reedSolomon) Update(shards [][]byte, newDatashards [][]byte) error {
}
func (r *reedSolomon) updateParityShards(matrixRows, oldinputs, newinputs, outputs [][]byte, outputCount, byteCount int) {
if len(outputs) == 0 {
return
}
if r.o.maxGoroutines > 1 && byteCount > r.o.minSplitSize {
r.updateParityShardsP(matrixRows, oldinputs, newinputs, outputs, outputCount, byteCount)
return
@ -612,6 +621,9 @@ func (r *reedSolomon) codeSomeShardsP(matrixRows, inputs, outputs [][]byte, outp
// except this will check values and return
// as soon as a difference is found.
func (r *reedSolomon) checkSomeShards(matrixRows, inputs, toCheck [][]byte, outputCount, byteCount int) bool {
if len(toCheck) == 0 {
return true
}
if r.o.maxGoroutines > 1 && byteCount > r.o.minSplitSize {
return r.checkSomeShardsP(matrixRows, inputs, toCheck, outputCount, byteCount)
}

View File

@ -180,7 +180,9 @@ func TestEncoding(t *testing.T) {
// matrix sizes to test.
// note that par1 matric will fail on some combinations.
var testSizes = [][2]int{{1, 1}, {1, 2}, {3, 3}, {3, 1}, {5, 3}, {8, 4}, {10, 30}, {12, 10}, {14, 7}, {41, 17}, {49, 1}}
var testSizes = [][2]int{
{1, 0}, {3, 0}, {5, 0}, {8, 0}, {10, 0}, {12, 0}, {14, 0}, {41, 0}, {49, 0},
{1, 1}, {1, 2}, {3, 3}, {3, 1}, {5, 3}, {8, 4}, {10, 30}, {12, 10}, {14, 7}, {41, 17}, {49, 1}}
var testDataSizes = []int{10, 100, 1000, 10001, 100003, 1000055}
var testDataSizesShort = []int{10, 10001, 100003}
@ -220,6 +222,22 @@ func testEncoding(t *testing.T, o ...Option) {
if !ok {
t.Fatal("Verification failed")
}
if parity == 0 {
// Check that Reconstruct and ReconstructData do nothing
err = r.ReconstructData(shards)
if err != nil {
t.Fatal(err)
}
err = r.Reconstruct(shards)
if err != nil {
t.Fatal(err)
}
// Skip integrity checks
return
}
// Delete one in data
idx := rng.Intn(data)
want := shards[idx]
@ -1434,10 +1452,12 @@ func TestNew(t *testing.T) {
{127, 127, nil},
{128, 128, nil},
{255, 1, nil},
{255, 0, nil},
{1, 0, nil},
{256, 256, ErrMaxShardNum},
{0, 1, ErrInvShardNum},
{1, 0, ErrInvShardNum},
{1, -1, ErrInvShardNum},
{256, 1, ErrMaxShardNum},
// overflow causes r.Shards to be negative

View File

@ -117,6 +117,84 @@ func TestStreamEncodingConcurrent(t *testing.T) {
}
}
func TestStreamZeroParity(t *testing.T) {
perShard := 10 << 20
if testing.Short() {
perShard = 50000
}
r, err := NewStream(10, 0, testOptions()...)
if err != nil {
t.Fatal(err)
}
rand.Seed(0)
input := randomBytes(10, perShard)
data := toBuffers(input)
err = r.Encode(toReaders(data), []io.Writer{})
if err != nil {
t.Fatal(err)
}
// Reset Data
data = toBuffers(input)
all := toReaders(data)
ok, err := r.Verify(all)
if err != nil {
t.Fatal(err)
}
if !ok {
t.Fatal("Verification failed")
}
// Reset Data
data = toBuffers(input)
// Check that Reconstruct does nothing
all = toReaders(data)
err = r.Reconstruct(all, nilWriters(10))
if err != nil {
t.Fatal(err)
}
}
func TestStreamZeroParityConcurrent(t *testing.T) {
perShard := 10 << 20
if testing.Short() {
perShard = 50000
}
r, err := NewStreamC(10, 0, true, true, testOptions()...)
if err != nil {
t.Fatal(err)
}
rand.Seed(0)
input := randomBytes(10, perShard)
data := toBuffers(input)
err = r.Encode(toReaders(data), []io.Writer{})
if err != nil {
t.Fatal(err)
}
// Reset Data
data = toBuffers(input)
all := toReaders(data)
ok, err := r.Verify(all)
if err != nil {
t.Fatal(err)
}
if !ok {
t.Fatal("Verification failed")
}
// Reset Data
data = toBuffers(input)
// Check that Reconstruct does nothing
all = toReaders(data)
err = r.Reconstruct(all, nilWriters(10))
if err != nil {
t.Fatal(err)
}
}
func randomBuffer(length int) *bytes.Buffer {
b := make([]byte, length)
fillRandom(b)
@ -605,10 +683,11 @@ func TestNewStream(t *testing.T) {
err error
}{
{127, 127, nil},
{1, 0, nil},
{256, 256, ErrMaxShardNum},
{0, 1, ErrInvShardNum},
{1, 0, ErrInvShardNum},
{1, -1, ErrInvShardNum},
{257, 1, ErrMaxShardNum},
// overflow causes r.Shards to be negative