diff --git a/cmd/thanos/compact.go b/cmd/thanos/compact.go index de65405fc80..b166adc006a 100644 --- a/cmd/thanos/compact.go +++ b/cmd/thanos/compact.go @@ -247,6 +247,8 @@ func runCompact( blockLister = block.NewConcurrentLister(logger, insBkt) case recursiveDiscovery: blockLister = block.NewRecursiveLister(logger, insBkt) + case birthstoneDiscovery: + blockLister = block.NewBirthstoneLister(logger, insBkt) default: return errors.Errorf("unknown sync strategy %s", conf.blockListStrategy) } @@ -381,6 +383,7 @@ func runCompact( metadata.HashFunc(conf.hashFunc), conf.blockFilesConcurrency, conf.compactBlocksFetchConcurrency, + conf.enableBirthstone, ) var planner compact.Planner @@ -409,6 +412,7 @@ func runCompact( insBkt, conf.compactionConcurrency, conf.skipBlockWithOutOfOrderChunks, + conf.enableBirthstone, ) if err != nil { return errors.Wrap(err, "create bucket compactor") @@ -518,6 +522,7 @@ func runCompact( conf.blockFilesConcurrency, metadata.HashFunc(conf.hashFunc), conf.acceptMalformedIndex, + conf.enableBirthstone, ); err != nil { return errors.Wrap(err, "first pass of downsampling failed") } @@ -547,6 +552,7 @@ func runCompact( conf.blockFilesConcurrency, metadata.HashFunc(conf.hashFunc), conf.acceptMalformedIndex, + conf.enableBirthstone, ); err != nil { return errors.Wrap(err, "second pass of downsampling failed") } @@ -792,6 +798,7 @@ type compactConfig struct { progressCalculateInterval time.Duration filterConf *store.FilterConfig disableAdminOperations bool + enableBirthstone bool } func (cc *compactConfig) registerFlag(cmd extkingpin.FlagClause) { @@ -832,7 +839,7 @@ func (cc *compactConfig) registerFlag(cmd extkingpin.FlagClause) { "as querying long time ranges without non-downsampled data is not efficient and useful e.g it is not possible to render all samples for a human eye anyway"). Default("false").BoolVar(&cc.disableDownsampling) - strategies := strings.Join([]string{string(concurrentDiscovery), string(recursiveDiscovery)}, ", ") + strategies := strings.Join([]string{string(concurrentDiscovery), string(recursiveDiscovery), string(birthstoneDiscovery)}, ", ") cmd.Flag("block-discovery-strategy", "One of "+strategies+". When set to concurrent, stores will concurrently issue one call per directory to discover active blocks in the bucket. The recursive strategy iterates through all objects in the bucket, recursively traversing into each directory. This avoids N+1 calls at the expense of having slower bucket iterations."). Default(string(concurrentDiscovery)).StringVar(&cc.blockListStrategy) cmd.Flag("block-meta-fetch-concurrency", "Number of goroutines to use when fetching block metadata from object storage."). @@ -910,4 +917,7 @@ func (cc *compactConfig) registerFlag(cmd extkingpin.FlagClause) { cmd.Flag("bucket-web-label", "External block label to use as group title in the bucket web UI").StringVar(&cc.label) cmd.Flag("disable-admin-operations", "Disable UI/API admin operations like marking blocks for deletion and no compaction.").Default("false").BoolVar(&cc.disableAdminOperations) + + cmd.Flag("enable-birthstone", "When set to true, upload and delete a birthstone file when block is created and deleted. Birthstone file marks the completeness of a block in bucket."). + Hidden().Default("false").BoolVar(&cc.enableBirthstone) } diff --git a/cmd/thanos/config.go b/cmd/thanos/config.go index f72d19fd79f..c02e0deca53 100644 --- a/cmd/thanos/config.go +++ b/cmd/thanos/config.go @@ -167,6 +167,7 @@ type shipperConfig struct { allowOutOfOrderUpload bool hashFunc string metaFileName string + enableBirthstone bool } func (sc *shipperConfig) registerFlag(cmd extkingpin.FlagClause) *shipperConfig { @@ -184,6 +185,9 @@ func (sc *shipperConfig) registerFlag(cmd extkingpin.FlagClause) *shipperConfig cmd.Flag("hash-func", "Specify which hash function to use when calculating the hashes of produced files. If no function has been specified, it does not happen. This permits avoiding downloading some files twice albeit at some performance cost. Possible values are: \"\", \"SHA256\"."). Default("").EnumVar(&sc.hashFunc, "SHA256", "") cmd.Flag("shipper.meta-file-name", "the file to store shipper metadata in").Default(shipper.DefaultMetaFilename).StringVar(&sc.metaFileName) + cmd.Flag("shipper.enable-birthstone", + "If true shipper will upload a birthstone when a block is completely uploaded to bucket."). + Default("false").BoolVar(&sc.enableBirthstone) return sc } diff --git a/cmd/thanos/downsample.go b/cmd/thanos/downsample.go index 556369b0a1f..85d0130d981 100644 --- a/cmd/thanos/downsample.go +++ b/cmd/thanos/downsample.go @@ -78,6 +78,7 @@ func RunDownsample( objStoreConfig *extflag.PathOrContent, comp component.Component, hashFunc metadata.HashFunc, + enableBirthstone bool, ) error { confContentYaml, err := objStoreConfig.Content() if err != nil { @@ -134,7 +135,7 @@ func RunDownsample( metrics.downsamples.WithLabelValues(resolutionLabel) metrics.downsampleFailures.WithLabelValues(resolutionLabel) } - if err := downsampleBucket(ctx, logger, metrics, insBkt, metas, dataDir, downsampleConcurrency, blockFilesConcurrency, hashFunc, false); err != nil { + if err := downsampleBucket(ctx, logger, metrics, insBkt, metas, dataDir, downsampleConcurrency, blockFilesConcurrency, hashFunc, false, enableBirthstone); err != nil { return errors.Wrap(err, "downsampling failed") } @@ -143,7 +144,7 @@ func RunDownsample( if err != nil { return errors.Wrap(err, "sync before second pass of downsampling") } - if err := downsampleBucket(ctx, logger, metrics, insBkt, metas, dataDir, downsampleConcurrency, blockFilesConcurrency, hashFunc, false); err != nil { + if err := downsampleBucket(ctx, logger, metrics, insBkt, metas, dataDir, downsampleConcurrency, blockFilesConcurrency, hashFunc, false, enableBirthstone); err != nil { return errors.Wrap(err, "downsampling failed") } return nil @@ -185,6 +186,7 @@ func downsampleBucket( blockFilesConcurrency int, hashFunc metadata.HashFunc, acceptMalformedIndex bool, + enableBirthstone bool, ) (rerr error) { if err := os.MkdirAll(dir, 0750); err != nil { return errors.Wrap(err, "create dir") @@ -262,7 +264,7 @@ func downsampleBucket( resolution = downsample.ResLevel2 errMsg = "downsampling to 60 min" } - if err := processDownsampling(workerCtx, logger, bkt, m, dir, resolution, hashFunc, metrics, acceptMalformedIndex, blockFilesConcurrency); err != nil { + if err := processDownsampling(workerCtx, logger, bkt, m, dir, resolution, hashFunc, metrics, acceptMalformedIndex, blockFilesConcurrency, enableBirthstone); err != nil { metrics.downsampleFailures.WithLabelValues(m.Thanos.ResolutionString()).Inc() errCh <- errors.Wrap(err, errMsg) @@ -353,6 +355,7 @@ func processDownsampling( metrics *DownsampleMetrics, acceptMalformedIndex bool, blockFilesConcurrency int, + enableBirthstone bool, ) error { begin := time.Now() bdir := filepath.Join(dir, m.ULID.String()) @@ -418,7 +421,7 @@ func processDownsampling( begin = time.Now() - err = block.Upload(ctx, logger, bkt, resdir, hashFunc) + err = block.Upload(ctx, logger, bkt, resdir, hashFunc, enableBirthstone) if err != nil { return compact.NewRetryError(errors.Wrapf(err, "upload downsampled block %s", id)) } diff --git a/cmd/thanos/main_test.go b/cmd/thanos/main_test.go index 6cbd84cb152..3ea0970ac8a 100644 --- a/cmd/thanos/main_test.go +++ b/cmd/thanos/main_test.go @@ -138,7 +138,7 @@ func TestRegression4960_Deadlock(t *testing.T) { labels.Labels{{Name: "e1", Value: "1"}}, downsample.ResLevel0, metadata.NoneFunc) testutil.Ok(t, err) - testutil.Ok(t, block.Upload(ctx, logger, bkt, path.Join(dir, id.String()), metadata.NoneFunc)) + testutil.Ok(t, block.Upload(ctx, logger, bkt, path.Join(dir, id.String()), metadata.NoneFunc, false)) } { id2, err = e2eutil.CreateBlock( @@ -149,7 +149,7 @@ func TestRegression4960_Deadlock(t *testing.T) { labels.Labels{{Name: "e1", Value: "2"}}, downsample.ResLevel0, metadata.NoneFunc) testutil.Ok(t, err) - testutil.Ok(t, block.Upload(ctx, logger, bkt, path.Join(dir, id2.String()), metadata.NoneFunc)) + testutil.Ok(t, block.Upload(ctx, logger, bkt, path.Join(dir, id2.String()), metadata.NoneFunc, false)) } { id3, err = e2eutil.CreateBlock( @@ -160,7 +160,7 @@ func TestRegression4960_Deadlock(t *testing.T) { labels.Labels{{Name: "e1", Value: "2"}}, downsample.ResLevel0, metadata.NoneFunc) testutil.Ok(t, err) - testutil.Ok(t, block.Upload(ctx, logger, bkt, path.Join(dir, id3.String()), metadata.NoneFunc)) + testutil.Ok(t, block.Upload(ctx, logger, bkt, path.Join(dir, id3.String()), metadata.NoneFunc, false)) } meta, err := block.DownloadMeta(ctx, logger, bkt, id) @@ -174,7 +174,7 @@ func TestRegression4960_Deadlock(t *testing.T) { metas, _, err := metaFetcher.Fetch(ctx) testutil.Ok(t, err) - err = downsampleBucket(ctx, logger, metrics, bkt, metas, dir, 1, 1, metadata.NoneFunc, false) + err = downsampleBucket(ctx, logger, metrics, bkt, metas, dir, 1, 1, metadata.NoneFunc, false, false) testutil.NotOk(t, err) testutil.Assert(t, strings.Contains(err.Error(), "some random error has occurred")) @@ -200,7 +200,7 @@ func TestCleanupDownsampleCacheFolder(t *testing.T) { labels.Labels{{Name: "e1", Value: "1"}}, downsample.ResLevel0, metadata.NoneFunc) testutil.Ok(t, err) - testutil.Ok(t, block.Upload(ctx, logger, bkt, path.Join(dir, id.String()), metadata.NoneFunc)) + testutil.Ok(t, block.Upload(ctx, logger, bkt, path.Join(dir, id.String()), metadata.NoneFunc, false)) } meta, err := block.DownloadMeta(ctx, logger, bkt, id) @@ -214,7 +214,7 @@ func TestCleanupDownsampleCacheFolder(t *testing.T) { metas, _, err := metaFetcher.Fetch(ctx) testutil.Ok(t, err) - testutil.Ok(t, downsampleBucket(ctx, logger, metrics, bkt, metas, dir, 1, 1, metadata.NoneFunc, false)) + testutil.Ok(t, downsampleBucket(ctx, logger, metrics, bkt, metas, dir, 1, 1, metadata.NoneFunc, false, false)) testutil.Equals(t, 1.0, promtest.ToFloat64(metrics.downsamples.WithLabelValues(meta.Thanos.ResolutionString()))) _, err = os.Stat(dir) diff --git a/cmd/thanos/receive.go b/cmd/thanos/receive.go index a9173b34882..6ebbe4cdfdb 100644 --- a/cmd/thanos/receive.go +++ b/cmd/thanos/receive.go @@ -150,6 +150,9 @@ func runReceive( level.Info(logger).Log("msg", "metric name filter feature enabled") } } + if conf.enableBirthstone { + multiTSDBOptions = append(multiTSDBOptions, receive.WithBirthstoneEnabled()) + } // Create a matcher converter if specified by command line to cache expensive regex matcher conversions. // Proxy store and TSDB stores of all tenants share a single cache. @@ -985,6 +988,7 @@ type receiveConfig struct { ignoreBlockSize bool allowOutOfOrderUpload bool + enableBirthstone bool reqLogConfig *extflag.PathOrContent relabelConfigPath *extflag.PathOrContent @@ -1153,6 +1157,10 @@ func (rc *receiveConfig) registerFlag(cmd extkingpin.FlagClause) { "about order."). Default("false").Hidden().BoolVar(&rc.allowOutOfOrderUpload) + cmd.Flag("shipper.enable-birthstone", + "If true, shipper will upload a birthstone for each complete block to bucket."). + Default("false").Hidden().BoolVar(&rc.enableBirthstone) + rc.reqLogConfig = extkingpin.RegisterRequestLoggingFlags(cmd) rc.writeLimitsConfig = extflag.RegisterPathOrContent(cmd, "receive.limits-config", "YAML file that contains limit configuration.", extflag.WithEnvSubstitution(), extflag.WithHidden()) diff --git a/cmd/thanos/rule.go b/cmd/thanos/rule.go index 41d996e0200..97209355b44 100644 --- a/cmd/thanos/rule.go +++ b/cmd/thanos/rule.go @@ -856,7 +856,7 @@ func runRule( } }() - s := shipper.New(logger, reg, conf.dataDir, bkt, func() labels.Labels { return conf.lset }, metadata.RulerSource, nil, conf.shipper.allowOutOfOrderUpload, metadata.HashFunc(conf.shipper.hashFunc), conf.shipper.metaFileName) + s := shipper.New(logger, reg, conf.dataDir, bkt, func() labels.Labels { return conf.lset }, metadata.RulerSource, nil, conf.shipper.allowOutOfOrderUpload, metadata.HashFunc(conf.shipper.hashFunc), conf.shipper.metaFileName, conf.shipper.enableBirthstone) ctx, cancel := context.WithCancel(context.Background()) diff --git a/cmd/thanos/sidecar.go b/cmd/thanos/sidecar.go index 127584ea947..f3dc1b8a4c0 100644 --- a/cmd/thanos/sidecar.go +++ b/cmd/thanos/sidecar.go @@ -416,7 +416,7 @@ func runSidecar( uploadCompactedFunc := func() bool { return conf.shipper.uploadCompacted } s := shipper.New(logger, reg, conf.tsdb.path, bkt, m.Labels, metadata.SidecarSource, - uploadCompactedFunc, conf.shipper.allowOutOfOrderUpload, metadata.HashFunc(conf.shipper.hashFunc), conf.shipper.metaFileName) + uploadCompactedFunc, conf.shipper.allowOutOfOrderUpload, metadata.HashFunc(conf.shipper.hashFunc), conf.shipper.metaFileName, conf.shipper.enableBirthstone) return runutil.Repeat(30*time.Second, ctx.Done(), func() error { if uploaded, err := s.Sync(ctx); err != nil { diff --git a/cmd/thanos/store.go b/cmd/thanos/store.go index 795c108cd3f..369e09c679b 100644 --- a/cmd/thanos/store.go +++ b/cmd/thanos/store.go @@ -64,6 +64,7 @@ type syncStrategy string const ( concurrentDiscovery syncStrategy = "concurrent" recursiveDiscovery syncStrategy = "recursive" + birthstoneDiscovery syncStrategy = "birthstone" ) type storeConfig struct { diff --git a/cmd/thanos/tools_bucket.go b/cmd/thanos/tools_bucket.go index e0391af15b5..c92eb96e7ec 100644 --- a/cmd/thanos/tools_bucket.go +++ b/cmd/thanos/tools_bucket.go @@ -12,10 +12,12 @@ import ( "io" "net/http" "os" + "path" "path/filepath" "sort" "strconv" "strings" + "sync" "text/template" "time" @@ -35,6 +37,7 @@ import ( "github.com/prometheus/prometheus/model/relabel" "github.com/prometheus/prometheus/tsdb" "github.com/prometheus/prometheus/tsdb/chunkenc" + "golang.org/x/sync/errgroup" "golang.org/x/text/language" "golang.org/x/text/message" "gopkg.in/yaml.v3" @@ -89,11 +92,12 @@ const ( ) type bucketRewriteConfig struct { - blockIDs []string - tmpDir string - dryRun bool - promBlocks bool - deleteBlocks bool + blockIDs []string + tmpDir string + dryRun bool + promBlocks bool + deleteBlocks bool + enableBirthstone bool } type bucketInspectConfig struct { @@ -103,9 +107,10 @@ type bucketInspectConfig struct { } type bucketVerifyConfig struct { - repair bool - ids []string - issuesToVerify []string + repair bool + ids []string + issuesToVerify []string + enableBirthstone bool } type bucketLsConfig struct { @@ -139,6 +144,7 @@ type bucketDownsampleConfig struct { blockFilesConcurrency int dataDir string hashFunc string + enableBirthstone bool } type bucketCleanupConfig struct { @@ -176,6 +182,8 @@ func (tbc *bucketVerifyConfig) registerBucketVerifyFlag(cmd extkingpin.FlagClaus cmd.Flag("id", "Block IDs to verify (and optionally repair) only. "+ "If none is specified, all blocks will be verified. Repeated field").StringsVar(&tbc.ids) + + cmd.Flag("enable-birthstone", "Upload birthstone to mark block completion.").Default("false").BoolVar(&tbc.enableBirthstone) return tbc } @@ -238,6 +246,7 @@ func (tbc *bucketRewriteConfig) registerBucketRewriteFlag(cmd extkingpin.FlagCla cmd.Flag("dry-run", "Prints the series changes instead of doing them. Defaults to true, for user to double check. (: Pass --no-dry-run to skip this.").Default("true").BoolVar(&tbc.dryRun) cmd.Flag("prom-blocks", "If specified, we assume the blocks to be uploaded are only used with Prometheus so we don't check external labels in this case.").Default("false").BoolVar(&tbc.promBlocks) cmd.Flag("delete-blocks", "Whether to delete the original blocks after rewriting blocks successfully. Available in non dry-run mode only.").Default("false").BoolVar(&tbc.deleteBlocks) + cmd.Flag("enable-birthstone", "Upload birthstone to mark block completion.").Default("false").BoolVar(&tbc.enableBirthstone) return tbc } @@ -253,6 +262,7 @@ func (tbc *bucketDownsampleConfig) registerBucketDownsampleFlag(cmd extkingpin.F Default("./data").StringVar(&tbc.dataDir) cmd.Flag("hash-func", "Specify which hash function to use when calculating the hashes of produced files. If no function has been specified, it does not happen. This permits avoiding downloading some files twice albeit at some performance cost. Possible values are: \"\", \"SHA256\"."). Default("").EnumVar(&tbc.hashFunc, "SHA256", "") + cmd.Flag("enable-birthstone", "Upload birthstone to mark block completion.").Default("false").BoolVar(&tbc.enableBirthstone) return tbc } @@ -306,6 +316,7 @@ func registerBucket(app extkingpin.AppClause) { registerBucketRewrite(cmd, objStoreConfig) registerBucketRetention(cmd, objStoreConfig) registerBucketUploadBlocks(cmd, objStoreConfig) + registerBucketUploadBirthstone(cmd, objStoreConfig) } func registerBucketVerify(app extkingpin.AppClause, objStoreConfig *extflag.PathOrContent) { @@ -390,7 +401,7 @@ func registerBucketVerify(app extkingpin.AppClause, objStoreConfig *extflag.Path } } - v := verifier.NewManager(reg, logger, insBkt, backupBkt, fetcher, time.Duration(*deleteDelay), r) + v := verifier.NewManager(reg, logger, insBkt, backupBkt, fetcher, time.Duration(*deleteDelay), tbc.enableBirthstone, r) if tbc.repair { return v.VerifyAndRepair(context.Background(), idMatcher) } @@ -799,7 +810,7 @@ func registerBucketDownsample(app extkingpin.AppClause, objStoreConfig *extflag. cmd.Setup(func(g *run.Group, logger log.Logger, reg *prometheus.Registry, tracer opentracing.Tracer, _ <-chan struct{}, _ bool) error { return RunDownsample(g, logger, reg, *httpAddr, *httpTLSConfig, time.Duration(*httpGracePeriod), tbc.dataDir, - tbc.waitInterval, tbc.downsampleConcurrency, tbc.blockFilesConcurrency, objStoreConfig, component.Downsample, metadata.HashFunc(tbc.hashFunc)) + tbc.waitInterval, tbc.downsampleConcurrency, tbc.blockFilesConcurrency, objStoreConfig, component.Downsample, metadata.HashFunc(tbc.hashFunc), tbc.enableBirthstone) }) } @@ -1297,11 +1308,11 @@ func registerBucketRewrite(app extkingpin.AppClause, objStoreConfig *extflag.Pat level.Info(logger).Log("msg", "uploading new block", "source", id, "new", newID) if tbc.promBlocks { - if err := block.UploadPromBlock(ctx, logger, insBkt, filepath.Join(tbc.tmpDir, newID.String()), metadata.HashFunc(*hashFunc)); err != nil { + if err := block.UploadPromBlock(ctx, logger, insBkt, filepath.Join(tbc.tmpDir, newID.String()), metadata.HashFunc(*hashFunc), tbc.enableBirthstone); err != nil { return errors.Wrap(err, "upload") } } else { - if err := block.Upload(ctx, logger, insBkt, filepath.Join(tbc.tmpDir, newID.String()), metadata.HashFunc(*hashFunc)); err != nil { + if err := block.Upload(ctx, logger, insBkt, filepath.Join(tbc.tmpDir, newID.String()), metadata.HashFunc(*hashFunc), tbc.enableBirthstone); err != nil { return errors.Wrap(err, "upload") } } @@ -1471,7 +1482,7 @@ func registerBucketUploadBlocks(app extkingpin.AppClause, objStoreConfig *extfla bkt = objstoretracing.WrapWithTraces(objstore.WrapWithMetrics(bkt, extprom.WrapRegistererWithPrefix("thanos_", reg), bkt.Name())) s := shipper.New(logger, reg, tbc.path, bkt, func() labels.Labels { return lset }, metadata.BucketUploadSource, - nil, false, metadata.HashFunc(""), shipper.DefaultMetaFilename) + nil, false, metadata.HashFunc(""), shipper.DefaultMetaFilename, false) ctx, cancel := context.WithCancel(context.Background()) g.Add(func() error { @@ -1488,3 +1499,104 @@ func registerBucketUploadBlocks(app extkingpin.AppClause, objStoreConfig *extfla return nil }) } + +func registerBucketUploadBirthstone(app extkingpin.AppClause, objStoreConfig *extflag.PathOrContent) { + cmd := app.Command("upload-birthstones", "Create birthstones for blocks in the bucket. Should pause compaction first to avoid race conditions. Expected to be idempotent.") + cmd.Setup(func(g *run.Group, logger log.Logger, reg *prometheus.Registry, _ opentracing.Tracer, _ <-chan struct{}, _ bool) error { + confContentYaml, err := objStoreConfig.Content() + if err != nil { + return errors.Wrap(err, "unable to parse objstore config") + } + bkt, err := client.NewBucket(logger, confContentYaml, component.Upload.String(), nil) + if err != nil { + return errors.Wrap(err, "unable to create bucket") + } + defer runutil.CloseWithLogOnErr(logger, bkt, "bucket client") + bkt = objstoretracing.WrapWithTraces(objstore.WrapWithMetrics(bkt, extprom.WrapRegistererWithPrefix("thanos_", reg), bkt.Name())) + ctx, cancel := context.WithCancel(context.Background()) + g.Add(func() error { + err := UploadBirthstone(ctx, logger, bkt) + if err != nil { + return errors.Wrap(err, "unable to upload birthstones") + } + return nil + }, func(error) { + cancel() + }) + return nil + }) +} + +func UploadBirthstone(ctx context.Context, logger log.Logger, bkt objstore.Bucket) error { + totalBlocks := 0 + partialBlocks := 0 + if logger != nil { + level.Info(logger).Log("msg", "concurrent birthstone upload started") + start := time.Now() + defer func() { + level.Info(logger).Log("msg", "concurrent birthstone upload end", "duration", time.Since(start), "uploaded", totalBlocks, "partial", partialBlocks) + }() + } + + const concurrency = 64 + var ( + ch = make(chan ulid.ULID, concurrency) + eg, gCtx = errgroup.WithContext(ctx) + mu sync.Mutex + ) + for i := 0; i < concurrency; i++ { + eg.Go(func() error { + for uid := range ch { + select { + case <-gCtx.Done(): + return gCtx.Err() + default: + } + metaFile := path.Join(uid.String(), block.MetaFilename) + ok, err := bkt.Exists(gCtx, metaFile) + if err != nil { + if logger != nil { + level.Error(logger).Log( + "msg", "concurrent block lister worker failed to check meta.json file existence", + "meta_file", metaFile, + "err", err, + ) + } + return errors.Wrapf(err, "meta.json file exists: %v", uid) + } + if !ok { + mu.Lock() + partialBlocks++ + mu.Unlock() + continue + } + if err := bkt.Upload(ctx, path.Join(block.BirthstoneDirname, uid.String()), strings.NewReader("")); err != nil { + return errors.Wrap(err, "upload birthstone file") + } + } + return nil + }) + } + + if err := bkt.Iter(ctx, "", func(name string) error { + id, ok := block.IsBlockDir(name) + if !ok { + return nil + } + totalBlocks++ + select { + case <-gCtx.Done(): + return gCtx.Err() + case ch <- id: + } + return nil + }); err != nil { + return err + } + close(ch) + + if err := eg.Wait(); err != nil { + return err + } + return nil +} diff --git a/pkg/api/blocks/v1_test.go b/pkg/api/blocks/v1_test.go index 49e7ef0681e..6ebdb1e9097 100644 --- a/pkg/api/blocks/v1_test.go +++ b/pkg/api/blocks/v1_test.go @@ -110,7 +110,8 @@ func TestMarkBlockEndpoint(t *testing.T) { // upload block bkt := objstore.WithNoopInstr(objstore.NewInMemBucket()) logger := log.NewNopLogger() - testutil.Ok(t, block.Upload(ctx, logger, bkt, path.Join(tmpDir, b1.String()), metadata.NoneFunc)) + // this test is invariant to enableBirthstone flag, setting false here + testutil.Ok(t, block.Upload(ctx, logger, bkt, path.Join(tmpDir, b1.String()), metadata.NoneFunc, false)) now := time.Now() api := &BlocksAPI{ diff --git a/pkg/block/block.go b/pkg/block/block.go index 00fda388316..ef404d8d7fa 100644 --- a/pkg/block/block.go +++ b/pkg/block/block.go @@ -40,6 +40,8 @@ const ( // DebugMetas is a directory for debug meta files that happen in the past. Useful for debugging. DebugMetas = "debug/metas" + // BirthstoneDirname is the directory for birthstone files. + BirthstoneDirname = "birthstones" ) // Download downloads directory that is mean to be block directory. If any of the files @@ -93,22 +95,23 @@ func Download(ctx context.Context, logger log.Logger, bucket objstore.Bucket, id } // Upload uploads a TSDB block to the object storage. It verifies basic -// features of Thanos block. -func Upload(ctx context.Context, logger log.Logger, bkt objstore.Bucket, bdir string, hf metadata.HashFunc, options ...objstore.UploadOption) error { - return upload(ctx, logger, bkt, bdir, hf, true, options...) +// features of Thanos block. If birthstone is enabled, a birthstone file is used to mark the completion of the block +// upload and will be uploaded last. +func Upload(ctx context.Context, logger log.Logger, bkt objstore.Bucket, bdir string, hf metadata.HashFunc, enableBirthstone bool, options ...objstore.UploadOption) error { + return upload(ctx, logger, bkt, bdir, hf, true, enableBirthstone, options...) } // UploadPromBlock uploads a TSDB block to the object storage. It assumes // the block is used in Prometheus so it doesn't check Thanos external labels. -func UploadPromBlock(ctx context.Context, logger log.Logger, bkt objstore.Bucket, bdir string, hf metadata.HashFunc, options ...objstore.UploadOption) error { - return upload(ctx, logger, bkt, bdir, hf, false, options...) +func UploadPromBlock(ctx context.Context, logger log.Logger, bkt objstore.Bucket, bdir string, hf metadata.HashFunc, enableBirthstone bool, options ...objstore.UploadOption) error { + return upload(ctx, logger, bkt, bdir, hf, false, enableBirthstone, options...) } // upload uploads block from given block dir that ends with block id. // It makes sure cleanup is done on error to avoid partial block uploads. // TODO(bplotka): Ensure bucket operations have reasonable backoff retries. // NOTE: Upload updates `meta.Thanos.File` section. -func upload(ctx context.Context, logger log.Logger, bkt objstore.Bucket, bdir string, hf metadata.HashFunc, checkExternalLabels bool, options ...objstore.UploadOption) error { +func upload(ctx context.Context, logger log.Logger, bkt objstore.Bucket, bdir string, hf metadata.HashFunc, checkExternalLabels bool, uploadBirthstone bool, options ...objstore.UploadOption) error { df, err := os.Stat(bdir) if err != nil { return err @@ -161,7 +164,12 @@ func upload(ctx context.Context, logger log.Logger, bkt objstore.Bucket, bdir st // If meta.json is not uploaded, this will produce partial blocks, but such blocks will be cleaned later. return errors.Wrap(err, "upload meta file") } - + if !uploadBirthstone { + return nil + } + if err := bkt.Upload(ctx, path.Join(BirthstoneDirname, id.String()), strings.NewReader("")); err != nil { + return errors.Wrap(err, "upload birthstone file") + } return nil } @@ -211,11 +219,23 @@ func MarkForDeletion(ctx context.Context, logger log.Logger, bkt objstore.Bucket // only if they don't have meta.json. If meta.json is present Thanos assumes valid block. // - This avoids deleting empty dir (whole bucket) by mistake. func Delete(ctx context.Context, logger log.Logger, bkt objstore.Bucket, id ulid.ULID) error { - metaFile := path.Join(id.String(), MetaFilename) - deletionMarkFile := path.Join(id.String(), metadata.DeletionMarkFilename) + // Delete the birthstone first if it exists. + birthstoneFile := path.Join(BirthstoneDirname, id.String()) + ok, err := bkt.Exists(ctx, birthstoneFile) + if err != nil { + return errors.Wrapf(err, "stat %s", birthstoneFile) + } + + if ok { + if err := bkt.Delete(ctx, birthstoneFile); err != nil { + return errors.Wrapf(err, "delete %s", birthstoneFile) + } + level.Debug(logger).Log("msg", "deleted file", "file", birthstoneFile, "bucket", bkt.Name()) + } // Delete block meta file. - ok, err := bkt.Exists(ctx, metaFile) + metaFile := path.Join(id.String(), MetaFilename) + ok, err = bkt.Exists(ctx, metaFile) if err != nil { return errors.Wrapf(err, "stat %s", metaFile) } @@ -230,6 +250,7 @@ func Delete(ctx context.Context, logger log.Logger, bkt objstore.Bucket, id ulid // Delete the block objects, but skip: // - The metaFile as we just deleted. This is required for eventual object storages (list after write). // - The deletionMarkFile as we'll delete it at last. + deletionMarkFile := path.Join(id.String(), metadata.DeletionMarkFilename) err = deleteDirRec(ctx, logger, bkt, id.String(), func(name string) bool { return name == metaFile || name == deletionMarkFile }) diff --git a/pkg/block/block_test.go b/pkg/block/block_test.go index 1ebe6b1c6a8..74293618deb 100644 --- a/pkg/block/block_test.go +++ b/pkg/block/block_test.go @@ -79,75 +79,85 @@ func TestIsBlockDir(t *testing.T) { } func TestUpload(t *testing.T) { - defer custom.TolerantVerifyLeak(t) + runTest := func(t *testing.T, enableBirthstone bool) { + var filesPerBlock int + if enableBirthstone { + filesPerBlock = 4 + } else { + filesPerBlock = 3 + } + defer custom.TolerantVerifyLeak(t) - ctx := context.Background() + ctx := context.Background() - tmpDir := t.TempDir() + tmpDir := t.TempDir() - bkt := objstore.NewInMemBucket() - b1, err := e2eutil.CreateBlock(ctx, tmpDir, []labels.Labels{ - labels.New(labels.Label{Name: "a", Value: "1"}), - labels.New(labels.Label{Name: "a", Value: "2"}), - labels.New(labels.Label{Name: "a", Value: "3"}), - labels.New(labels.Label{Name: "a", Value: "4"}), - labels.New(labels.Label{Name: "b", Value: "1"}), - }, 100, 0, 1000, labels.New(labels.Label{Name: "ext1", Value: "val1"}), 124, metadata.NoneFunc) - testutil.Ok(t, err) - testutil.Ok(t, os.MkdirAll(path.Join(tmpDir, "test", b1.String()), os.ModePerm)) - - { - // Wrong dir. - err := Upload(ctx, log.NewNopLogger(), bkt, path.Join(tmpDir, "not-existing"), metadata.NoneFunc) - testutil.NotOk(t, err) - testutil.Assert(t, strings.HasSuffix(err.Error(), "/not-existing: no such file or directory"), "") - } - { - // Wrong existing dir (not a block). - err := Upload(ctx, log.NewNopLogger(), bkt, path.Join(tmpDir, "test"), metadata.NoneFunc) - testutil.NotOk(t, err) - testutil.Equals(t, "not a block dir: ulid: bad data size when unmarshaling", err.Error()) - } - { - // Empty block dir. - err := Upload(ctx, log.NewNopLogger(), bkt, path.Join(tmpDir, "test", b1.String()), metadata.NoneFunc) - testutil.NotOk(t, err) - testutil.Assert(t, strings.HasSuffix(err.Error(), "/meta.json: no such file or directory"), "") - } - e2eutil.Copy(t, path.Join(tmpDir, b1.String(), MetaFilename), path.Join(tmpDir, "test", b1.String(), MetaFilename)) - { - // Missing chunks. - err := Upload(ctx, log.NewNopLogger(), bkt, path.Join(tmpDir, "test", b1.String()), metadata.NoneFunc) - testutil.NotOk(t, err) - testutil.Assert(t, strings.HasSuffix(err.Error(), "/chunks: no such file or directory"), err.Error()) - } - testutil.Ok(t, os.MkdirAll(path.Join(tmpDir, "test", b1.String(), ChunksDirname), os.ModePerm)) - e2eutil.Copy(t, path.Join(tmpDir, b1.String(), ChunksDirname, "000001"), path.Join(tmpDir, "test", b1.String(), ChunksDirname, "000001")) - { - // Missing index file. - err := Upload(ctx, log.NewNopLogger(), bkt, path.Join(tmpDir, "test", b1.String()), metadata.NoneFunc) - testutil.NotOk(t, err) - testutil.Assert(t, strings.HasSuffix(err.Error(), "/index: no such file or directory"), "") - } - e2eutil.Copy(t, path.Join(tmpDir, b1.String(), IndexFilename), path.Join(tmpDir, "test", b1.String(), IndexFilename)) - testutil.Ok(t, os.Remove(path.Join(tmpDir, "test", b1.String(), MetaFilename))) - { - // Missing meta.json file. - err := Upload(ctx, log.NewNopLogger(), bkt, path.Join(tmpDir, "test", b1.String()), metadata.NoneFunc) - testutil.NotOk(t, err) - testutil.Assert(t, strings.HasSuffix(err.Error(), "/meta.json: no such file or directory"), "") - } - e2eutil.Copy(t, path.Join(tmpDir, b1.String(), MetaFilename), path.Join(tmpDir, "test", b1.String(), MetaFilename)) - { - // Full block. - testutil.Ok(t, Upload(ctx, log.NewNopLogger(), bkt, path.Join(tmpDir, "test", b1.String()), metadata.NoneFunc)) - testutil.Equals(t, 3, len(bkt.Objects())) - testutil.Equals(t, 3727, len(bkt.Objects()[path.Join(b1.String(), ChunksDirname, "000001")])) - testutil.Equals(t, 401, len(bkt.Objects()[path.Join(b1.String(), IndexFilename)])) - testutil.Equals(t, 595, len(bkt.Objects()[path.Join(b1.String(), MetaFilename)])) - - // File stats are gathered. - testutil.Equals(t, fmt.Sprintf(`{ + bkt := objstore.NewInMemBucket() + b1, err := e2eutil.CreateBlock(ctx, tmpDir, []labels.Labels{ + labels.New(labels.Label{Name: "a", Value: "1"}), + labels.New(labels.Label{Name: "a", Value: "2"}), + labels.New(labels.Label{Name: "a", Value: "3"}), + labels.New(labels.Label{Name: "a", Value: "4"}), + labels.New(labels.Label{Name: "b", Value: "1"}), + }, 100, 0, 1000, labels.New(labels.Label{Name: "ext1", Value: "val1"}), 124, metadata.NoneFunc) + testutil.Ok(t, err) + testutil.Ok(t, os.MkdirAll(path.Join(tmpDir, "test", b1.String()), os.ModePerm)) + + { + // Wrong dir. + err := Upload(ctx, log.NewNopLogger(), bkt, path.Join(tmpDir, "not-existing"), metadata.NoneFunc, enableBirthstone) + testutil.NotOk(t, err) + testutil.Assert(t, strings.HasSuffix(err.Error(), "/not-existing: no such file or directory"), "") + } + { + // Wrong existing dir (not a block). + err := Upload(ctx, log.NewNopLogger(), bkt, path.Join(tmpDir, "test"), metadata.NoneFunc, enableBirthstone) + testutil.NotOk(t, err) + testutil.Equals(t, "not a block dir: ulid: bad data size when unmarshaling", err.Error()) + } + { + // Empty block dir. + err := Upload(ctx, log.NewNopLogger(), bkt, path.Join(tmpDir, "test", b1.String()), metadata.NoneFunc, enableBirthstone) + testutil.NotOk(t, err) + testutil.Assert(t, strings.HasSuffix(err.Error(), "/meta.json: no such file or directory"), "") + } + e2eutil.Copy(t, path.Join(tmpDir, b1.String(), MetaFilename), path.Join(tmpDir, "test", b1.String(), MetaFilename)) + { + // Missing chunks. + err := Upload(ctx, log.NewNopLogger(), bkt, path.Join(tmpDir, "test", b1.String()), metadata.NoneFunc, enableBirthstone) + testutil.NotOk(t, err) + testutil.Assert(t, strings.HasSuffix(err.Error(), "/chunks: no such file or directory"), err.Error()) + } + testutil.Ok(t, os.MkdirAll(path.Join(tmpDir, "test", b1.String(), ChunksDirname), os.ModePerm)) + e2eutil.Copy(t, path.Join(tmpDir, b1.String(), ChunksDirname, "000001"), path.Join(tmpDir, "test", b1.String(), ChunksDirname, "000001")) + { + // Missing index file. + err := Upload(ctx, log.NewNopLogger(), bkt, path.Join(tmpDir, "test", b1.String()), metadata.NoneFunc, enableBirthstone) + testutil.NotOk(t, err) + testutil.Assert(t, strings.HasSuffix(err.Error(), "/index: no such file or directory"), "") + } + e2eutil.Copy(t, path.Join(tmpDir, b1.String(), IndexFilename), path.Join(tmpDir, "test", b1.String(), IndexFilename)) + testutil.Ok(t, os.Remove(path.Join(tmpDir, "test", b1.String(), MetaFilename))) + { + // Missing meta.json file. + err := Upload(ctx, log.NewNopLogger(), bkt, path.Join(tmpDir, "test", b1.String()), metadata.NoneFunc, enableBirthstone) + testutil.NotOk(t, err) + testutil.Assert(t, strings.HasSuffix(err.Error(), "/meta.json: no such file or directory"), "") + } + e2eutil.Copy(t, path.Join(tmpDir, b1.String(), MetaFilename), path.Join(tmpDir, "test", b1.String(), MetaFilename)) + { + // Full block. + testutil.Ok(t, Upload(ctx, log.NewNopLogger(), bkt, path.Join(tmpDir, "test", b1.String()), metadata.NoneFunc, enableBirthstone)) + testutil.Equals(t, filesPerBlock, len(bkt.Objects())) + testutil.Equals(t, 3727, len(bkt.Objects()[path.Join(b1.String(), ChunksDirname, "000001")])) + testutil.Equals(t, 401, len(bkt.Objects()[path.Join(b1.String(), IndexFilename)])) + testutil.Equals(t, 595, len(bkt.Objects()[path.Join(b1.String(), MetaFilename)])) + if enableBirthstone { + testutil.Equals(t, 0, len(bkt.Objects()[path.Join(BirthstoneDirname, b1.String())])) + } + + // File stats are gathered. + testutil.Equals(t, fmt.Sprintf(`{ "ulid": "%s", "minTime": 0, "maxTime": 1000, @@ -190,301 +200,364 @@ func TestUpload(t *testing.T) { } } `, b1.String(), b1.String()), string(bkt.Objects()[path.Join(b1.String(), MetaFilename)])) + } + { + // Test Upload is idempotent. + testutil.Ok(t, Upload(ctx, log.NewNopLogger(), bkt, path.Join(tmpDir, "test", b1.String()), metadata.NoneFunc, enableBirthstone)) + testutil.Equals(t, filesPerBlock, len(bkt.Objects())) + testutil.Equals(t, 3727, len(bkt.Objects()[path.Join(b1.String(), ChunksDirname, "000001")])) + testutil.Equals(t, 401, len(bkt.Objects()[path.Join(b1.String(), IndexFilename)])) + testutil.Equals(t, 595, len(bkt.Objects()[path.Join(b1.String(), MetaFilename)])) + if enableBirthstone { + testutil.Equals(t, 0, len(bkt.Objects()[path.Join(BirthstoneDirname, b1.String())])) + } + } + { + // Upload with no external labels should be blocked. + b2, err := e2eutil.CreateBlock(ctx, tmpDir, []labels.Labels{ + labels.New(labels.Label{Name: "a", Value: "1"}), + labels.New(labels.Label{Name: "a", Value: "2"}), + labels.New(labels.Label{Name: "a", Value: "3"}), + labels.New(labels.Label{Name: "a", Value: "4"}), + labels.New(labels.Label{Name: "b", Value: "1"}), + }, 100, 0, 1000, labels.EmptyLabels(), 124, metadata.NoneFunc) + testutil.Ok(t, err) + err = Upload(ctx, log.NewNopLogger(), bkt, path.Join(tmpDir, b2.String()), metadata.NoneFunc, enableBirthstone) + testutil.NotOk(t, err) + testutil.Equals(t, "empty external labels are not allowed for Thanos block.", err.Error()) + testutil.Equals(t, filesPerBlock, len(bkt.Objects())) + } + { + // No external labels with UploadPromBlocks. + b2, err := e2eutil.CreateBlock(ctx, tmpDir, []labels.Labels{ + labels.New(labels.Label{Name: "a", Value: "1"}), + labels.New(labels.Label{Name: "a", Value: "2"}), + labels.New(labels.Label{Name: "a", Value: "3"}), + labels.New(labels.Label{Name: "a", Value: "4"}), + labels.New(labels.Label{Name: "b", Value: "1"}), + }, 100, 0, 1000, labels.EmptyLabels(), 124, metadata.NoneFunc) + testutil.Ok(t, err) + err = UploadPromBlock(ctx, log.NewNopLogger(), bkt, path.Join(tmpDir, b2.String()), metadata.NoneFunc, enableBirthstone) + testutil.Ok(t, err) + testutil.Equals(t, 2*filesPerBlock, len(bkt.Objects())) + testutil.Equals(t, 3727, len(bkt.Objects()[path.Join(b2.String(), ChunksDirname, "000001")])) + testutil.Equals(t, 401, len(bkt.Objects()[path.Join(b2.String(), IndexFilename)])) + testutil.Equals(t, 574, len(bkt.Objects()[path.Join(b2.String(), MetaFilename)])) + if enableBirthstone { + testutil.Equals(t, 0, len(bkt.Objects()[path.Join(BirthstoneDirname, b2.String())])) + } + } } - { - // Test Upload is idempotent. - testutil.Ok(t, Upload(ctx, log.NewNopLogger(), bkt, path.Join(tmpDir, "test", b1.String()), metadata.NoneFunc)) - testutil.Equals(t, 3, len(bkt.Objects())) - testutil.Equals(t, 3727, len(bkt.Objects()[path.Join(b1.String(), ChunksDirname, "000001")])) - testutil.Equals(t, 401, len(bkt.Objects()[path.Join(b1.String(), IndexFilename)])) - testutil.Equals(t, 595, len(bkt.Objects()[path.Join(b1.String(), MetaFilename)])) - } - { - // Upload with no external labels should be blocked. - b2, err := e2eutil.CreateBlock(ctx, tmpDir, []labels.Labels{ - labels.New(labels.Label{Name: "a", Value: "1"}), - labels.New(labels.Label{Name: "a", Value: "2"}), - labels.New(labels.Label{Name: "a", Value: "3"}), - labels.New(labels.Label{Name: "a", Value: "4"}), - labels.New(labels.Label{Name: "b", Value: "1"}), - }, 100, 0, 1000, labels.EmptyLabels(), 124, metadata.NoneFunc) - testutil.Ok(t, err) - err = Upload(ctx, log.NewNopLogger(), bkt, path.Join(tmpDir, b2.String()), metadata.NoneFunc) - testutil.NotOk(t, err) - testutil.Equals(t, "empty external labels are not allowed for Thanos block.", err.Error()) - testutil.Equals(t, 3, len(bkt.Objects())) - } - { - // No external labels with UploadPromBlocks. - b2, err := e2eutil.CreateBlock(ctx, tmpDir, []labels.Labels{ - labels.New(labels.Label{Name: "a", Value: "1"}), - labels.New(labels.Label{Name: "a", Value: "2"}), - labels.New(labels.Label{Name: "a", Value: "3"}), - labels.New(labels.Label{Name: "a", Value: "4"}), - labels.New(labels.Label{Name: "b", Value: "1"}), - }, 100, 0, 1000, labels.EmptyLabels(), 124, metadata.NoneFunc) - testutil.Ok(t, err) - err = UploadPromBlock(ctx, log.NewNopLogger(), bkt, path.Join(tmpDir, b2.String()), metadata.NoneFunc) - testutil.Ok(t, err) - testutil.Equals(t, 6, len(bkt.Objects())) - testutil.Equals(t, 3727, len(bkt.Objects()[path.Join(b2.String(), ChunksDirname, "000001")])) - testutil.Equals(t, 401, len(bkt.Objects()[path.Join(b2.String(), IndexFilename)])) - testutil.Equals(t, 574, len(bkt.Objects()[path.Join(b2.String(), MetaFilename)])) - } + + t.Run("enableBirthstone", func(t *testing.T) { + runTest(t, true) + }) + t.Run("disableBirthstone", func(t *testing.T) { + runTest(t, false) + }) } func TestDelete(t *testing.T) { - defer custom.TolerantVerifyLeak(t) - ctx := context.Background() + runTest := func(t *testing.T, enableBirthstone bool) { + var filesPerBlock int + if enableBirthstone { + filesPerBlock = 4 + } else { + filesPerBlock = 3 + } + defer custom.TolerantVerifyLeak(t) + ctx := context.Background() - tmpDir := t.TempDir() + tmpDir := t.TempDir() - bkt := objstore.NewInMemBucket() - { - b1, err := e2eutil.CreateBlock(ctx, tmpDir, []labels.Labels{ - labels.New(labels.Label{Name: "a", Value: "1"}), - labels.New(labels.Label{Name: "a", Value: "2"}), - labels.New(labels.Label{Name: "a", Value: "3"}), - labels.New(labels.Label{Name: "a", Value: "4"}), - labels.New(labels.Label{Name: "b", Value: "1"}), - }, 100, 0, 1000, labels.New(labels.Label{Name: "ext1", Value: "val1"}), 124, metadata.NoneFunc) - testutil.Ok(t, err) - testutil.Ok(t, Upload(ctx, log.NewNopLogger(), bkt, path.Join(tmpDir, b1.String()), metadata.NoneFunc)) - testutil.Equals(t, 3, len(bkt.Objects())) + bkt := objstore.NewInMemBucket() + { + b1, err := e2eutil.CreateBlock(ctx, tmpDir, []labels.Labels{ + labels.New(labels.Label{Name: "a", Value: "1"}), + labels.New(labels.Label{Name: "a", Value: "2"}), + labels.New(labels.Label{Name: "a", Value: "3"}), + labels.New(labels.Label{Name: "a", Value: "4"}), + labels.New(labels.Label{Name: "b", Value: "1"}), + }, 100, 0, 1000, labels.New(labels.Label{Name: "ext1", Value: "val1"}), 124, metadata.NoneFunc) + testutil.Ok(t, err) + testutil.Ok(t, Upload(ctx, log.NewNopLogger(), bkt, path.Join(tmpDir, b1.String()), metadata.NoneFunc, enableBirthstone)) + testutil.Equals(t, filesPerBlock, len(bkt.Objects())) - markedForDeletion := promauto.With(prometheus.NewRegistry()).NewCounter(prometheus.CounterOpts{Name: "test"}) - testutil.Ok(t, MarkForDeletion(ctx, log.NewNopLogger(), bkt, b1, "", markedForDeletion)) + markedForDeletion := promauto.With(prometheus.NewRegistry()).NewCounter(prometheus.CounterOpts{Name: "test"}) + testutil.Ok(t, MarkForDeletion(ctx, log.NewNopLogger(), bkt, b1, "", markedForDeletion)) - // Full delete. - testutil.Ok(t, Delete(ctx, log.NewNopLogger(), bkt, b1)) - testutil.Equals(t, 0, len(bkt.Objects())) - } - { - b2, err := e2eutil.CreateBlock(ctx, tmpDir, []labels.Labels{ - labels.New(labels.Label{Name: "a", Value: "1"}), - labels.New(labels.Label{Name: "a", Value: "2"}), - labels.New(labels.Label{Name: "a", Value: "3"}), - labels.New(labels.Label{Name: "a", Value: "4"}), - labels.New(labels.Label{Name: "b", Value: "1"}), - }, 100, 0, 1000, labels.New(labels.Label{Name: "ext1", Value: "val1"}), 124, metadata.NoneFunc) - testutil.Ok(t, err) - testutil.Ok(t, Upload(ctx, log.NewNopLogger(), bkt, path.Join(tmpDir, b2.String()), metadata.NoneFunc)) - testutil.Equals(t, 3, len(bkt.Objects())) + // Full delete. + testutil.Ok(t, Delete(ctx, log.NewNopLogger(), bkt, b1)) + testutil.Equals(t, 0, len(bkt.Objects())) + } + { + b2, err := e2eutil.CreateBlock(ctx, tmpDir, []labels.Labels{ + labels.New(labels.Label{Name: "a", Value: "1"}), + labels.New(labels.Label{Name: "a", Value: "2"}), + labels.New(labels.Label{Name: "a", Value: "3"}), + labels.New(labels.Label{Name: "a", Value: "4"}), + labels.New(labels.Label{Name: "b", Value: "1"}), + }, 100, 0, 1000, labels.New(labels.Label{Name: "ext1", Value: "val1"}), 124, metadata.NoneFunc) + testutil.Ok(t, err) + testutil.Ok(t, Upload(ctx, log.NewNopLogger(), bkt, path.Join(tmpDir, b2.String()), metadata.NoneFunc, enableBirthstone)) + testutil.Equals(t, filesPerBlock, len(bkt.Objects())) - // Remove meta.json and check if delete can delete it. - testutil.Ok(t, bkt.Delete(ctx, path.Join(b2.String(), MetaFilename))) - testutil.Ok(t, Delete(ctx, log.NewNopLogger(), bkt, b2)) - testutil.Equals(t, 0, len(bkt.Objects())) + // Remove meta.json and check if delete can delete it. + testutil.Ok(t, bkt.Delete(ctx, path.Join(b2.String(), MetaFilename))) + testutil.Ok(t, Delete(ctx, log.NewNopLogger(), bkt, b2)) + testutil.Equals(t, 0, len(bkt.Objects())) + } } + + t.Run("enableBirthstone", func(t *testing.T) { + runTest(t, true) + }) + t.Run("disableBirthstone", func(t *testing.T) { + runTest(t, false) + }) } func TestMarkForDeletion(t *testing.T) { - defer custom.TolerantVerifyLeak(t) - ctx := context.Background() + runTest := func(t *testing.T, enableBirthstone bool) { + defer custom.TolerantVerifyLeak(t) + ctx := context.Background() - tmpDir := t.TempDir() + tmpDir := t.TempDir() - for _, tcase := range []struct { - name string - preUpload func(t testing.TB, id ulid.ULID, bkt objstore.Bucket) + for _, tcase := range []struct { + name string + preUpload func(t testing.TB, id ulid.ULID, bkt objstore.Bucket) - blocksMarked int - }{ - { - name: "block marked for deletion", - preUpload: func(t testing.TB, id ulid.ULID, bkt objstore.Bucket) {}, - blocksMarked: 1, - }, - { - name: "block with deletion mark already, expected log and no metric increment", - preUpload: func(t testing.TB, id ulid.ULID, bkt objstore.Bucket) { - deletionMark, err := json.Marshal(metadata.DeletionMark{ - ID: id, - DeletionTime: time.Now().Unix(), - Version: metadata.DeletionMarkVersion1, - }) - testutil.Ok(t, err) - testutil.Ok(t, bkt.Upload(ctx, path.Join(id.String(), metadata.DeletionMarkFilename), bytes.NewReader(deletionMark))) + blocksMarked int + }{ + { + name: "block marked for deletion", + preUpload: func(t testing.TB, id ulid.ULID, bkt objstore.Bucket) {}, + blocksMarked: 1, }, - blocksMarked: 0, - }, - } { - t.Run(tcase.name, func(t *testing.T) { - bkt := objstore.NewInMemBucket() - id, err := e2eutil.CreateBlock(ctx, tmpDir, []labels.Labels{ - labels.New(labels.Label{Name: "a", Value: "1"}), - labels.New(labels.Label{Name: "a", Value: "2"}), - labels.New(labels.Label{Name: "a", Value: "3"}), - labels.New(labels.Label{Name: "a", Value: "4"}), - labels.New(labels.Label{Name: "b", Value: "1"}), - }, 100, 0, 1000, labels.New(labels.Label{Name: "ext1", Value: "val1"}), 124, metadata.NoneFunc) - testutil.Ok(t, err) + { + name: "block with deletion mark already, expected log and no metric increment", + preUpload: func(t testing.TB, id ulid.ULID, bkt objstore.Bucket) { + deletionMark, err := json.Marshal(metadata.DeletionMark{ + ID: id, + DeletionTime: time.Now().Unix(), + Version: metadata.DeletionMarkVersion1, + }) + testutil.Ok(t, err) + testutil.Ok(t, bkt.Upload(ctx, path.Join(id.String(), metadata.DeletionMarkFilename), bytes.NewReader(deletionMark))) + }, + blocksMarked: 0, + }, + } { + t.Run(tcase.name, func(t *testing.T) { + bkt := objstore.NewInMemBucket() + id, err := e2eutil.CreateBlock(ctx, tmpDir, []labels.Labels{ + labels.New(labels.Label{Name: "a", Value: "1"}), + labels.New(labels.Label{Name: "a", Value: "2"}), + labels.New(labels.Label{Name: "a", Value: "3"}), + labels.New(labels.Label{Name: "a", Value: "4"}), + labels.New(labels.Label{Name: "b", Value: "1"}), + }, 100, 0, 1000, labels.New(labels.Label{Name: "ext1", Value: "val1"}), 124, metadata.NoneFunc) + testutil.Ok(t, err) - tcase.preUpload(t, id, bkt) + tcase.preUpload(t, id, bkt) - testutil.Ok(t, Upload(ctx, log.NewNopLogger(), bkt, path.Join(tmpDir, id.String()), metadata.NoneFunc)) + testutil.Ok(t, Upload(ctx, log.NewNopLogger(), bkt, path.Join(tmpDir, id.String()), metadata.NoneFunc, enableBirthstone)) - c := promauto.With(nil).NewCounter(prometheus.CounterOpts{}) - err = MarkForDeletion(ctx, log.NewNopLogger(), bkt, id, "", c) - testutil.Ok(t, err) - testutil.Equals(t, float64(tcase.blocksMarked), promtest.ToFloat64(c)) - }) + c := promauto.With(nil).NewCounter(prometheus.CounterOpts{}) + err = MarkForDeletion(ctx, log.NewNopLogger(), bkt, id, "", c) + testutil.Ok(t, err) + testutil.Equals(t, float64(tcase.blocksMarked), promtest.ToFloat64(c)) + }) + } } + + t.Run("enableBirthstone", func(t *testing.T) { + runTest(t, true) + }) + t.Run("disableBirthstone", func(t *testing.T) { + runTest(t, false) + }) + } func TestMarkForNoCompact(t *testing.T) { - defer custom.TolerantVerifyLeak(t) - ctx := context.Background() + runTest := func(t *testing.T, enableBirthstone bool) { + defer custom.TolerantVerifyLeak(t) + ctx := context.Background() - tmpDir := t.TempDir() + tmpDir := t.TempDir() - for _, tcase := range []struct { - name string - preUpload func(t testing.TB, id ulid.ULID, bkt objstore.Bucket) + for _, tcase := range []struct { + name string + preUpload func(t testing.TB, id ulid.ULID, bkt objstore.Bucket) - blocksMarked int - }{ - { - name: "block marked", - preUpload: func(t testing.TB, id ulid.ULID, bkt objstore.Bucket) {}, - blocksMarked: 1, - }, - { - name: "block with no-compact mark already, expected log and no metric increment", - preUpload: func(t testing.TB, id ulid.ULID, bkt objstore.Bucket) { - m, err := json.Marshal(metadata.NoCompactMark{ - ID: id, - NoCompactTime: time.Now().Unix(), - Version: metadata.NoCompactMarkVersion1, - }) - testutil.Ok(t, err) - testutil.Ok(t, bkt.Upload(ctx, path.Join(id.String(), metadata.NoCompactMarkFilename), bytes.NewReader(m))) + blocksMarked int + }{ + { + name: "block marked", + preUpload: func(t testing.TB, id ulid.ULID, bkt objstore.Bucket) {}, + blocksMarked: 1, }, - blocksMarked: 0, - }, - } { - t.Run(tcase.name, func(t *testing.T) { - bkt := objstore.NewInMemBucket() - id, err := e2eutil.CreateBlock(ctx, tmpDir, []labels.Labels{ - labels.New(labels.Label{Name: "a", Value: "1"}), - labels.New(labels.Label{Name: "a", Value: "2"}), - labels.New(labels.Label{Name: "a", Value: "3"}), - labels.New(labels.Label{Name: "a", Value: "4"}), - labels.New(labels.Label{Name: "b", Value: "1"}), - }, 100, 0, 1000, labels.New(labels.Label{Name: "ext1", Value: "val1"}), 124, metadata.NoneFunc) - testutil.Ok(t, err) + { + name: "block with no-compact mark already, expected log and no metric increment", + preUpload: func(t testing.TB, id ulid.ULID, bkt objstore.Bucket) { + m, err := json.Marshal(metadata.NoCompactMark{ + ID: id, + NoCompactTime: time.Now().Unix(), + Version: metadata.NoCompactMarkVersion1, + }) + testutil.Ok(t, err) + testutil.Ok(t, bkt.Upload(ctx, path.Join(id.String(), metadata.NoCompactMarkFilename), bytes.NewReader(m))) + }, + blocksMarked: 0, + }, + } { + t.Run(tcase.name, func(t *testing.T) { + bkt := objstore.NewInMemBucket() + id, err := e2eutil.CreateBlock(ctx, tmpDir, []labels.Labels{ + labels.New(labels.Label{Name: "a", Value: "1"}), + labels.New(labels.Label{Name: "a", Value: "2"}), + labels.New(labels.Label{Name: "a", Value: "3"}), + labels.New(labels.Label{Name: "a", Value: "4"}), + labels.New(labels.Label{Name: "b", Value: "1"}), + }, 100, 0, 1000, labels.New(labels.Label{Name: "ext1", Value: "val1"}), 124, metadata.NoneFunc) + testutil.Ok(t, err) - tcase.preUpload(t, id, bkt) + tcase.preUpload(t, id, bkt) - testutil.Ok(t, Upload(ctx, log.NewNopLogger(), bkt, path.Join(tmpDir, id.String()), metadata.NoneFunc)) + testutil.Ok(t, Upload(ctx, log.NewNopLogger(), bkt, path.Join(tmpDir, id.String()), metadata.NoneFunc, enableBirthstone)) - c := promauto.With(nil).NewCounter(prometheus.CounterOpts{}) - err = MarkForNoCompact(ctx, log.NewNopLogger(), bkt, id, metadata.ManualNoCompactReason, "", c) - testutil.Ok(t, err) - testutil.Equals(t, float64(tcase.blocksMarked), promtest.ToFloat64(c)) - }) + c := promauto.With(nil).NewCounter(prometheus.CounterOpts{}) + err = MarkForNoCompact(ctx, log.NewNopLogger(), bkt, id, metadata.ManualNoCompactReason, "", c) + testutil.Ok(t, err) + testutil.Equals(t, float64(tcase.blocksMarked), promtest.ToFloat64(c)) + }) + } } + + t.Run("enableBirthstone", func(t *testing.T) { + runTest(t, true) + }) + t.Run("disableBirthstone", func(t *testing.T) { + runTest(t, false) + }) } func TestMarkForNoDownsample(t *testing.T) { + runTest := func(t *testing.T, enableBirthstone bool) { + defer custom.TolerantVerifyLeak(t) + ctx := context.Background() - defer custom.TolerantVerifyLeak(t) - ctx := context.Background() - - tmpDir := t.TempDir() + tmpDir := t.TempDir() - for _, tcase := range []struct { - name string - preUpload func(t testing.TB, id ulid.ULID, bkt objstore.Bucket) + for _, tcase := range []struct { + name string + preUpload func(t testing.TB, id ulid.ULID, bkt objstore.Bucket) - blocksMarked int - }{ - { - name: "block marked", - preUpload: func(t testing.TB, id ulid.ULID, bkt objstore.Bucket) {}, - blocksMarked: 1, - }, - { - name: "block with no-downsample mark already, expected log and no metric increment", - preUpload: func(t testing.TB, id ulid.ULID, bkt objstore.Bucket) { - m, err := json.Marshal(metadata.NoDownsampleMark{ - ID: id, - NoDownsampleTime: time.Now().Unix(), - Version: metadata.NoDownsampleMarkVersion1, - }) - testutil.Ok(t, err) - testutil.Ok(t, bkt.Upload(ctx, path.Join(id.String(), metadata.NoDownsampleMarkFilename), bytes.NewReader(m))) + blocksMarked int + }{ + { + name: "block marked", + preUpload: func(t testing.TB, id ulid.ULID, bkt objstore.Bucket) {}, + blocksMarked: 1, }, - blocksMarked: 0, - }, - } { - t.Run(tcase.name, func(t *testing.T) { - bkt := objstore.NewInMemBucket() - id, err := e2eutil.CreateBlock(ctx, tmpDir, []labels.Labels{ - labels.New(labels.Label{Name: "a", Value: "1"}), - labels.New(labels.Label{Name: "a", Value: "2"}), - labels.New(labels.Label{Name: "a", Value: "3"}), - labels.New(labels.Label{Name: "a", Value: "4"}), - labels.New(labels.Label{Name: "b", Value: "1"}), - }, 100, 0, 1000, labels.New(labels.Label{Name: "ext1", Value: "val1"}), 124, metadata.NoneFunc) - testutil.Ok(t, err) + { + name: "block with no-downsample mark already, expected log and no metric increment", + preUpload: func(t testing.TB, id ulid.ULID, bkt objstore.Bucket) { + m, err := json.Marshal(metadata.NoDownsampleMark{ + ID: id, + NoDownsampleTime: time.Now().Unix(), + Version: metadata.NoDownsampleMarkVersion1, + }) + testutil.Ok(t, err) + testutil.Ok(t, bkt.Upload(ctx, path.Join(id.String(), metadata.NoDownsampleMarkFilename), bytes.NewReader(m))) + }, + blocksMarked: 0, + }, + } { + t.Run(tcase.name, func(t *testing.T) { + bkt := objstore.NewInMemBucket() + id, err := e2eutil.CreateBlock(ctx, tmpDir, []labels.Labels{ + labels.New(labels.Label{Name: "a", Value: "1"}), + labels.New(labels.Label{Name: "a", Value: "2"}), + labels.New(labels.Label{Name: "a", Value: "3"}), + labels.New(labels.Label{Name: "a", Value: "4"}), + labels.New(labels.Label{Name: "b", Value: "1"}), + }, 100, 0, 1000, labels.New(labels.Label{Name: "ext1", Value: "val1"}), 124, metadata.NoneFunc) + testutil.Ok(t, err) - tcase.preUpload(t, id, bkt) + tcase.preUpload(t, id, bkt) - testutil.Ok(t, Upload(ctx, log.NewNopLogger(), bkt, path.Join(tmpDir, id.String()), metadata.NoneFunc)) + testutil.Ok(t, Upload(ctx, log.NewNopLogger(), bkt, path.Join(tmpDir, id.String()), metadata.NoneFunc, enableBirthstone)) - c := promauto.With(nil).NewCounter(prometheus.CounterOpts{}) - err = MarkForNoDownsample(ctx, log.NewNopLogger(), bkt, id, metadata.ManualNoDownsampleReason, "", c) - testutil.Ok(t, err) - testutil.Equals(t, float64(tcase.blocksMarked), promtest.ToFloat64(c)) - }) + c := promauto.With(nil).NewCounter(prometheus.CounterOpts{}) + err = MarkForNoDownsample(ctx, log.NewNopLogger(), bkt, id, metadata.ManualNoDownsampleReason, "", c) + testutil.Ok(t, err) + testutil.Equals(t, float64(tcase.blocksMarked), promtest.ToFloat64(c)) + }) + } } + + t.Run("enableBirthstone", func(t *testing.T) { + runTest(t, true) + }) + t.Run("disableBirthstone", func(t *testing.T) { + runTest(t, false) + }) } // TestHashDownload uploads an empty block to in-memory storage // and tries to download it to the same dir. It should not try // to download twice. func TestHashDownload(t *testing.T) { - defer custom.TolerantVerifyLeak(t) + runTest := func(t *testing.T, enableBirthstone bool) { + var filesPerBlock int + if enableBirthstone { + filesPerBlock = 4 + } else { + filesPerBlock = 3 + } + defer custom.TolerantVerifyLeak(t) - ctx := context.Background() + ctx := context.Background() - tmpDir := t.TempDir() + tmpDir := t.TempDir() - bkt := objstore.NewInMemBucket() - r := prometheus.NewRegistry() - instrumentedBkt := objstore.WrapWithMetrics(bkt, extprom.WrapRegistererWithPrefix("thanos_", r), "test") + bkt := objstore.NewInMemBucket() + r := prometheus.NewRegistry() + instrumentedBkt := objstore.WrapWithMetrics(bkt, extprom.WrapRegistererWithPrefix("thanos_", r), "test") - b1, err := e2eutil.CreateBlockWithTombstone(ctx, tmpDir, []labels.Labels{ - labels.New(labels.Label{Name: "a", Value: "1"}), - }, 100, 0, 1000, labels.New(labels.Label{Name: "ext1", Value: "val1"}), 42, metadata.SHA256Func) - testutil.Ok(t, err) + b1, err := e2eutil.CreateBlockWithTombstone(ctx, tmpDir, []labels.Labels{ + labels.New(labels.Label{Name: "a", Value: "1"}), + }, 100, 0, 1000, labels.New(labels.Label{Name: "ext1", Value: "val1"}), 42, metadata.SHA256Func) + testutil.Ok(t, err) - testutil.Ok(t, Upload(ctx, log.NewNopLogger(), instrumentedBkt, path.Join(tmpDir, b1.String()), metadata.SHA256Func)) - testutil.Equals(t, 3, len(bkt.Objects())) + testutil.Ok(t, Upload(ctx, log.NewNopLogger(), instrumentedBkt, path.Join(tmpDir, b1.String()), metadata.SHA256Func, enableBirthstone)) + testutil.Equals(t, filesPerBlock, len(bkt.Objects())) - m, err := DownloadMeta(ctx, log.NewNopLogger(), bkt, b1) - testutil.Ok(t, err) + m, err := DownloadMeta(ctx, log.NewNopLogger(), bkt, b1) + testutil.Ok(t, err) - for _, fl := range m.Thanos.Files { - if fl.RelPath == MetaFilename { - continue + for _, fl := range m.Thanos.Files { + if fl.RelPath == MetaFilename { + continue + } + testutil.Assert(t, fl.Hash != nil, "expected a hash for %s but got nil", fl.RelPath) } - testutil.Assert(t, fl.Hash != nil, "expected a hash for %s but got nil", fl.RelPath) - } - // Remove the hash from one file to check if we always download it. - m.Thanos.Files[1].Hash = nil + // Remove the hash from one file to check if we always download it. + m.Thanos.Files[1].Hash = nil - metaEncoded := strings.Builder{} - testutil.Ok(t, m.Write(&metaEncoded)) - testutil.Ok(t, bkt.Upload(ctx, path.Join(b1.String(), MetaFilename), strings.NewReader(metaEncoded.String()))) + metaEncoded := strings.Builder{} + testutil.Ok(t, m.Write(&metaEncoded)) + testutil.Ok(t, bkt.Upload(ctx, path.Join(b1.String(), MetaFilename), strings.NewReader(metaEncoded.String()))) - // Only downloads MetaFile and IndexFile. - { - err = Download(ctx, log.NewNopLogger(), instrumentedBkt, m.ULID, path.Join(tmpDir, b1.String())) - testutil.Ok(t, err) - testutil.Ok(t, promtest.GatherAndCompare(r, strings.NewReader(` + // Only downloads MetaFile and IndexFile. + { + err = Download(ctx, log.NewNopLogger(), instrumentedBkt, m.ULID, path.Join(tmpDir, b1.String())) + testutil.Ok(t, err) + testutil.Ok(t, promtest.GatherAndCompare(r, strings.NewReader(fmt.Sprintf(` # HELP thanos_objstore_bucket_operations_total Total number of all attempted operations against a bucket. # TYPE thanos_objstore_bucket_operations_total counter thanos_objstore_bucket_operations_total{bucket="test",operation="attributes"} 0 @@ -493,16 +566,16 @@ func TestHashDownload(t *testing.T) { thanos_objstore_bucket_operations_total{bucket="test",operation="get"} 2 thanos_objstore_bucket_operations_total{bucket="test",operation="get_range"} 0 thanos_objstore_bucket_operations_total{bucket="test",operation="iter"} 2 - thanos_objstore_bucket_operations_total{bucket="test",operation="upload"} 3 - `), `thanos_objstore_bucket_operations_total`)) - } + thanos_objstore_bucket_operations_total{bucket="test",operation="upload"} %v + `, filesPerBlock)), `thanos_objstore_bucket_operations_total`)) + } - // Ensures that we always download MetaFile. - { - testutil.Ok(t, os.Remove(path.Join(tmpDir, b1.String(), MetaFilename))) - err = Download(ctx, log.NewNopLogger(), instrumentedBkt, m.ULID, path.Join(tmpDir, b1.String())) - testutil.Ok(t, err) - testutil.Ok(t, promtest.GatherAndCompare(r, strings.NewReader(` + // Ensures that we always download MetaFile. + { + testutil.Ok(t, os.Remove(path.Join(tmpDir, b1.String(), MetaFilename))) + err = Download(ctx, log.NewNopLogger(), instrumentedBkt, m.ULID, path.Join(tmpDir, b1.String())) + testutil.Ok(t, err) + testutil.Ok(t, promtest.GatherAndCompare(r, strings.NewReader(fmt.Sprintf(` # HELP thanos_objstore_bucket_operations_total Total number of all attempted operations against a bucket. # TYPE thanos_objstore_bucket_operations_total counter thanos_objstore_bucket_operations_total{bucket="test",operation="attributes"} 0 @@ -511,18 +584,18 @@ func TestHashDownload(t *testing.T) { thanos_objstore_bucket_operations_total{bucket="test",operation="get"} 4 thanos_objstore_bucket_operations_total{bucket="test",operation="get_range"} 0 thanos_objstore_bucket_operations_total{bucket="test",operation="iter"} 4 - thanos_objstore_bucket_operations_total{bucket="test",operation="upload"} 3 - `), `thanos_objstore_bucket_operations_total`)) - } + thanos_objstore_bucket_operations_total{bucket="test",operation="upload"} %v + `, filesPerBlock)), `thanos_objstore_bucket_operations_total`)) + } - // Remove chunks => gets redownloaded. - // Always downloads MetaFile. - // Finally, downloads the IndexFile since we have removed its hash. - { - testutil.Ok(t, os.RemoveAll(path.Join(tmpDir, b1.String(), ChunksDirname))) - err = Download(ctx, log.NewNopLogger(), instrumentedBkt, m.ULID, path.Join(tmpDir, b1.String())) - testutil.Ok(t, err) - testutil.Ok(t, promtest.GatherAndCompare(r, strings.NewReader(` + // Remove chunks => gets redownloaded. + // Always downloads MetaFile. + // Finally, downloads the IndexFile since we have removed its hash. + { + testutil.Ok(t, os.RemoveAll(path.Join(tmpDir, b1.String(), ChunksDirname))) + err = Download(ctx, log.NewNopLogger(), instrumentedBkt, m.ULID, path.Join(tmpDir, b1.String())) + testutil.Ok(t, err) + testutil.Ok(t, promtest.GatherAndCompare(r, strings.NewReader(fmt.Sprintf(` # HELP thanos_objstore_bucket_operations_total Total number of all attempted operations against a bucket. # TYPE thanos_objstore_bucket_operations_total counter thanos_objstore_bucket_operations_total{bucket="test",operation="attributes"} 0 @@ -531,52 +604,69 @@ func TestHashDownload(t *testing.T) { thanos_objstore_bucket_operations_total{bucket="test",operation="get"} 7 thanos_objstore_bucket_operations_total{bucket="test",operation="get_range"} 0 thanos_objstore_bucket_operations_total{bucket="test",operation="iter"} 6 - thanos_objstore_bucket_operations_total{bucket="test",operation="upload"} 3 - `), `thanos_objstore_bucket_operations_total`)) + thanos_objstore_bucket_operations_total{bucket="test",operation="upload"} %v + `, filesPerBlock)), `thanos_objstore_bucket_operations_total`)) + } } + + t.Run("enableBirthstone", func(t *testing.T) { + runTest(t, true) + }) + t.Run("disableBirthstone", func(t *testing.T) { + runTest(t, false) + }) } func TestUploadCleanup(t *testing.T) { - defer custom.TolerantVerifyLeak(t) + runTest := func(t *testing.T, enableBirthstone bool) { + defer custom.TolerantVerifyLeak(t) - ctx := context.Background() + ctx := context.Background() - tmpDir := t.TempDir() + tmpDir := t.TempDir() - bkt := objstore.NewInMemBucket() - b1, err := e2eutil.CreateBlock(ctx, tmpDir, []labels.Labels{ - labels.New(labels.Label{Name: "a", Value: "1"}), - labels.New(labels.Label{Name: "a", Value: "2"}), - labels.New(labels.Label{Name: "a", Value: "3"}), - labels.New(labels.Label{Name: "a", Value: "4"}), - labels.New(labels.Label{Name: "b", Value: "1"}), - }, 100, 0, 1000, labels.New(labels.Label{Name: "ext1", Value: "val1"}), 124, metadata.NoneFunc) - testutil.Ok(t, err) - - { - errBkt := errBucket{Bucket: bkt, failSuffix: "/index"} - - uploadErr := Upload(ctx, log.NewNopLogger(), errBkt, path.Join(tmpDir, b1.String()), metadata.NoneFunc) - testutil.Assert(t, errors.Is(uploadErr, errUploadFailed)) - - // If upload of index fails, block is deleted. - testutil.Equals(t, 0, len(bkt.Objects())) - testutil.Assert(t, len(bkt.Objects()[path.Join(DebugMetas, fmt.Sprintf("%s.json", b1.String()))]) == 0) - } + bkt := objstore.NewInMemBucket() + b1, err := e2eutil.CreateBlock(ctx, tmpDir, []labels.Labels{ + labels.New(labels.Label{Name: "a", Value: "1"}), + labels.New(labels.Label{Name: "a", Value: "2"}), + labels.New(labels.Label{Name: "a", Value: "3"}), + labels.New(labels.Label{Name: "a", Value: "4"}), + labels.New(labels.Label{Name: "b", Value: "1"}), + }, 100, 0, 1000, labels.New(labels.Label{Name: "ext1", Value: "val1"}), 124, metadata.NoneFunc) + testutil.Ok(t, err) - { - errBkt := errBucket{Bucket: bkt, failSuffix: "/meta.json"} + { + errBkt := errBucket{Bucket: bkt, failSuffix: "/index"} + + uploadErr := Upload(ctx, log.NewNopLogger(), errBkt, path.Join(tmpDir, b1.String()), metadata.NoneFunc, enableBirthstone) + testutil.Assert(t, errors.Is(uploadErr, errUploadFailed)) + + // If upload of index fails, block is deleted. + testutil.Equals(t, 0, len(bkt.Objects())) + testutil.Assert(t, len(bkt.Objects()[path.Join(DebugMetas, fmt.Sprintf("%s.json", b1.String()))]) == 0) + } - uploadErr := Upload(ctx, log.NewNopLogger(), errBkt, path.Join(tmpDir, b1.String()), metadata.NoneFunc) - testutil.Assert(t, errors.Is(uploadErr, errUploadFailed)) + { + errBkt := errBucket{Bucket: bkt, failSuffix: "/meta.json"} + + uploadErr := Upload(ctx, log.NewNopLogger(), errBkt, path.Join(tmpDir, b1.String()), metadata.NoneFunc, enableBirthstone) + testutil.Assert(t, errors.Is(uploadErr, errUploadFailed)) - // If upload of meta.json fails, nothing is cleaned up. - testutil.Equals(t, 3, len(bkt.Objects())) - testutil.Assert(t, len(bkt.Objects()[path.Join(b1.String(), ChunksDirname, "000001")]) > 0) - testutil.Assert(t, len(bkt.Objects()[path.Join(b1.String(), IndexFilename)]) > 0) - testutil.Assert(t, len(bkt.Objects()[path.Join(b1.String(), MetaFilename)]) > 0) - testutil.Assert(t, len(bkt.Objects()[path.Join(DebugMetas, fmt.Sprintf("%s.json", b1.String()))]) == 0) + // If upload of meta.json fails, nothing is cleaned up. + testutil.Equals(t, 3, len(bkt.Objects())) + testutil.Assert(t, len(bkt.Objects()[path.Join(b1.String(), ChunksDirname, "000001")]) > 0) + testutil.Assert(t, len(bkt.Objects()[path.Join(b1.String(), IndexFilename)]) > 0) + testutil.Assert(t, len(bkt.Objects()[path.Join(b1.String(), MetaFilename)]) > 0) + testutil.Assert(t, len(bkt.Objects()[path.Join(DebugMetas, fmt.Sprintf("%s.json", b1.String()))]) == 0) + } } + + t.Run("enableBirthstone", func(t *testing.T) { + runTest(t, true) + }) + t.Run("disableBirthstone", func(t *testing.T) { + runTest(t, false) + }) } var errUploadFailed = errors.New("upload failed") diff --git a/pkg/block/fetcher.go b/pkg/block/fetcher.go index c93642ca898..1ab144c2ab6 100644 --- a/pkg/block/fetcher.go +++ b/pkg/block/fetcher.go @@ -233,11 +233,12 @@ func NewRecursiveLister(logger log.Logger, bkt objstore.InstrumentedBucketReader } func (f *RecursiveLister) GetActiveAndPartialBlockIDs(ctx context.Context, ch chan<- ulid.ULID) (partialBlocks map[ulid.ULID]bool, err error) { + totalBlocks := 0 if f.logger != nil { level.Info(f.logger).Log("msg", "recursive block lister started") start := time.Now() defer func() { - level.Info(f.logger).Log("msg", "recursive block lister ended", "duration", time.Since(start)) + level.Info(f.logger).Log("msg", "recursive block lister ended", "duration", time.Since(start), "total", totalBlocks, "partial", len(partialBlocks)) }() } partialBlocks = make(map[ulid.ULID]bool) @@ -248,6 +249,7 @@ func (f *RecursiveLister) GetActiveAndPartialBlockIDs(ctx context.Context, ch ch if !ok { return nil } + totalBlocks++ if _, ok := partialBlocks[id]; !ok { partialBlocks[id] = true } @@ -284,11 +286,12 @@ func NewConcurrentLister(logger log.Logger, bkt objstore.InstrumentedBucketReade } func (f *ConcurrentLister) GetActiveAndPartialBlockIDs(ctx context.Context, ch chan<- ulid.ULID) (partialBlocks map[ulid.ULID]bool, err error) { + totalBlocks := 0 if f.logger != nil { level.Info(f.logger).Log("msg", "concurrent block lister started") start := time.Now() defer func() { - level.Info(f.logger).Log("msg", "concurrent block lister end", "duration", time.Since(start)) + level.Info(f.logger).Log("msg", "concurrent block lister end", "duration", time.Since(start), "total", totalBlocks, "partial", len(partialBlocks)) }() } @@ -339,6 +342,7 @@ func (f *ConcurrentLister) GetActiveAndPartialBlockIDs(ctx context.Context, ch c if !ok { return nil } + totalBlocks++ select { case <-gCtx.Done(): return gCtx.Err() @@ -356,6 +360,82 @@ func (f *ConcurrentLister) GetActiveAndPartialBlockIDs(ctx context.Context, ch c return partialBlocks, nil } +// BirthstoneLister lists block IDs. It checks complete blocks with birthstone instead of meta. +type BirthstoneLister struct { + logger log.Logger + bkt objstore.InstrumentedBucketReader +} + +func NewBirthstoneLister(logger log.Logger, bkt objstore.InstrumentedBucketReader) *BirthstoneLister { + if logger != nil { + level.Info(logger).Log("msg", "Using recursive block lister") + } + return &BirthstoneLister{ + logger: logger, + bkt: bkt, + } +} + +func (f *BirthstoneLister) GetActiveAndPartialBlockIDs(ctx context.Context, ch chan<- ulid.ULID) (partialBlocks map[ulid.ULID]bool, err error) { + totalBlocks := 0 + if f.logger != nil { + level.Info(f.logger).Log("msg", "birthstone block lister started") + start := time.Now() + defer func() { + level.Info(f.logger).Log("msg", "birthstone block lister ended", "duration", time.Since(start), "total", totalBlocks, "partial", len(partialBlocks)) + }() + } + partialBlocks = make(map[ulid.ULID]bool) + var ( + eg, gCtx = errgroup.WithContext(ctx) + allBlockIDs = make([]ulid.ULID, 0, 8192) + ) + + eg.Go(func() error { + return f.bkt.Iter(gCtx, BirthstoneDirname, func(name string) error { + id, ok := IsBlockDir(name) + if !ok { + return nil + } + // Block with a birthstone is considered complete. + partialBlocks[id] = false + select { + case <-gCtx.Done(): + return gCtx.Err() + case ch <- id: + } + return nil + }) + }) + eg.Go(func() error { + return f.bkt.Iter(gCtx, "", func(name string) error { + id, ok := IsBlockDir(name) + if !ok { + return nil + } + totalBlocks++ + allBlockIDs = append(allBlockIDs, id) + select { + case <-gCtx.Done(): + return gCtx.Err() + default: + return nil + } + }) + }) + if err := eg.Wait(); err != nil { + return nil, err + } + + // Mark blocks that don't have birthstone entries as partial. + for _, id := range allBlockIDs { + if _, ok := partialBlocks[id]; !ok { + partialBlocks[id] = true + } + } + return partialBlocks, nil +} + type MetadataFetcher interface { Fetch(ctx context.Context) (metas map[ulid.ULID]*metadata.Meta, partial map[ulid.ULID]error, err error) UpdateOnChange(func([]metadata.Meta, error)) diff --git a/pkg/block/fetcher_test.go b/pkg/block/fetcher_test.go index c9d787547cb..f0ceb1f2528 100644 --- a/pkg/block/fetcher_test.go +++ b/pkg/block/fetcher_test.go @@ -14,6 +14,7 @@ import ( "path/filepath" "runtime" "sort" + "strings" "testing" "time" @@ -64,236 +65,278 @@ func ULIDs(is ...int) []ulid.ULID { } func TestMetaFetcher_Fetch(t *testing.T) { - objtesting.ForeachStore(t, func(t *testing.T, bkt objstore.Bucket) { - ctx, cancel := context.WithTimeout(context.Background(), 120*time.Second) - defer cancel() - - dir := t.TempDir() - - var ulidToDelete ulid.ULID - r := prometheus.NewRegistry() - noopLogger := log.NewNopLogger() - insBkt := objstore.WithNoopInstr(bkt) - baseBlockIDsFetcher := NewConcurrentLister(noopLogger, insBkt) - baseFetcher, err := NewBaseFetcher(noopLogger, 20, insBkt, baseBlockIDsFetcher, dir, r) - testutil.Ok(t, err) - - fetcher := baseFetcher.NewMetaFetcher(r, []MetadataFilter{ - &ulidFilter{ulidToDelete: &ulidToDelete}, - }, nil) - - for i, tcase := range []struct { - name string - do func() - filterULID ulid.ULID - expectedMetas []ulid.ULID - expectedCorruptedMeta []ulid.ULID - expectedNoMeta []ulid.ULID - expectedFiltered int - expectedMetaErr error - }{ - { - name: "empty bucket", - do: func() {}, - - expectedMetas: ULIDs(), - expectedCorruptedMeta: ULIDs(), - expectedNoMeta: ULIDs(), - }, - { - name: "3 metas in bucket", - do: func() { - var meta metadata.Meta - meta.Version = 1 - meta.ULID = ULID(1) + const recursiveLister = "recursive" + const concurrentLister = "concurrent" + const birthstoneLister = "birthstone" + runTest := func(t *testing.T, lister string) { + objtesting.ForeachStore(t, func(t *testing.T, bkt objstore.Bucket) { + ctx, cancel := context.WithTimeout(context.Background(), 120*time.Second) + defer cancel() + + dir := t.TempDir() + + var ulidToDelete ulid.ULID + r := prometheus.NewRegistry() + noopLogger := log.NewNopLogger() + insBkt := objstore.WithNoopInstr(bkt) + var baseBlockIDsFetcher Lister + if lister == concurrentLister { + baseBlockIDsFetcher = NewConcurrentLister(noopLogger, insBkt) + } else if lister == recursiveLister { + baseBlockIDsFetcher = NewRecursiveLister(noopLogger, insBkt) + } else if lister == birthstoneLister { + baseBlockIDsFetcher = NewBirthstoneLister(noopLogger, insBkt) + } else { + t.Fatalf("unknown lister %v", lister) + } + baseFetcher, err := NewBaseFetcher(noopLogger, 20, insBkt, baseBlockIDsFetcher, dir, r) + testutil.Ok(t, err) - var buf bytes.Buffer - testutil.Ok(t, json.NewEncoder(&buf).Encode(&meta)) - testutil.Ok(t, bkt.Upload(ctx, path.Join(meta.ULID.String(), metadata.MetaFilename), &buf)) + fetcher := baseFetcher.NewMetaFetcher(r, []MetadataFilter{ + &ulidFilter{ulidToDelete: &ulidToDelete}, + }, nil) + + for i, tcase := range []struct { + name string + do func() + filterULID ulid.ULID + expectedMetas []ulid.ULID + expectedCorruptedMeta []ulid.ULID + expectedNoMeta []ulid.ULID + expectedFiltered int + expectedMetaErr error + }{ + { + name: "empty bucket", + do: func() {}, - meta.ULID = ULID(2) - testutil.Ok(t, json.NewEncoder(&buf).Encode(&meta)) - testutil.Ok(t, bkt.Upload(ctx, path.Join(meta.ULID.String(), metadata.MetaFilename), &buf)) + expectedMetas: ULIDs(), + expectedCorruptedMeta: ULIDs(), + expectedNoMeta: ULIDs(), + }, + { + name: "3 metas in bucket", + do: func() { + var meta metadata.Meta + meta.Version = 1 + meta.ULID = ULID(1) + + var buf bytes.Buffer + testutil.Ok(t, json.NewEncoder(&buf).Encode(&meta)) + testutil.Ok(t, bkt.Upload(ctx, path.Join(meta.ULID.String(), metadata.MetaFilename), &buf)) + if lister == birthstoneLister { + testutil.Ok(t, bkt.Upload(ctx, path.Join(BirthstoneDirname, meta.ULID.String()), strings.NewReader(""))) + } + + meta.ULID = ULID(2) + testutil.Ok(t, json.NewEncoder(&buf).Encode(&meta)) + testutil.Ok(t, bkt.Upload(ctx, path.Join(meta.ULID.String(), metadata.MetaFilename), &buf)) + if lister == birthstoneLister { + testutil.Ok(t, bkt.Upload(ctx, path.Join(BirthstoneDirname, meta.ULID.String()), strings.NewReader(""))) + } + + meta.ULID = ULID(3) + testutil.Ok(t, json.NewEncoder(&buf).Encode(&meta)) + testutil.Ok(t, bkt.Upload(ctx, path.Join(meta.ULID.String(), metadata.MetaFilename), &buf)) + if lister == birthstoneLister { + testutil.Ok(t, bkt.Upload(ctx, path.Join(BirthstoneDirname, meta.ULID.String()), strings.NewReader(""))) + } + }, - meta.ULID = ULID(3) - testutil.Ok(t, json.NewEncoder(&buf).Encode(&meta)) - testutil.Ok(t, bkt.Upload(ctx, path.Join(meta.ULID.String(), metadata.MetaFilename), &buf)) + expectedMetas: ULIDs(1, 2, 3), + expectedCorruptedMeta: ULIDs(), + expectedNoMeta: ULIDs(), }, + { + name: "nothing changed", + do: func() {}, - expectedMetas: ULIDs(1, 2, 3), - expectedCorruptedMeta: ULIDs(), - expectedNoMeta: ULIDs(), - }, - { - name: "nothing changed", - do: func() {}, + expectedMetas: ULIDs(1, 2, 3), + expectedCorruptedMeta: ULIDs(), + expectedNoMeta: ULIDs(), + }, + { + name: "fresh cache", + do: func() { + baseFetcher.cached = map[ulid.ULID]*metadata.Meta{} + }, - expectedMetas: ULIDs(1, 2, 3), - expectedCorruptedMeta: ULIDs(), - expectedNoMeta: ULIDs(), - }, - { - name: "fresh cache", - do: func() { - baseFetcher.cached = map[ulid.ULID]*metadata.Meta{} + expectedMetas: ULIDs(1, 2, 3), + expectedCorruptedMeta: ULIDs(), + expectedNoMeta: ULIDs(), }, + { + name: "fresh cache: meta 2 and 3 have corrupted data on disk ", + do: func() { + baseFetcher.cached = map[ulid.ULID]*metadata.Meta{} - expectedMetas: ULIDs(1, 2, 3), - expectedCorruptedMeta: ULIDs(), - expectedNoMeta: ULIDs(), - }, - { - name: "fresh cache: meta 2 and 3 have corrupted data on disk ", - do: func() { - baseFetcher.cached = map[ulid.ULID]*metadata.Meta{} + testutil.Ok(t, os.Remove(filepath.Join(dir, "meta-syncer", ULID(2).String(), MetaFilename))) - testutil.Ok(t, os.Remove(filepath.Join(dir, "meta-syncer", ULID(2).String(), MetaFilename))) + f, err := os.OpenFile(filepath.Join(dir, "meta-syncer", ULID(3).String(), MetaFilename), os.O_WRONLY, os.ModePerm) + testutil.Ok(t, err) - f, err := os.OpenFile(filepath.Join(dir, "meta-syncer", ULID(3).String(), MetaFilename), os.O_WRONLY, os.ModePerm) - testutil.Ok(t, err) + _, err = f.WriteString("{ almost") + testutil.Ok(t, err) + testutil.Ok(t, f.Close()) + }, - _, err = f.WriteString("{ almost") - testutil.Ok(t, err) - testutil.Ok(t, f.Close()) + expectedMetas: ULIDs(1, 2, 3), + expectedCorruptedMeta: ULIDs(), + expectedNoMeta: ULIDs(), }, + { + name: "block without meta", + do: func() { + testutil.Ok(t, bkt.Upload(ctx, path.Join(ULID(4).String(), "some-file"), bytes.NewBuffer([]byte("something")))) + }, - expectedMetas: ULIDs(1, 2, 3), - expectedCorruptedMeta: ULIDs(), - expectedNoMeta: ULIDs(), - }, - { - name: "block without meta", - do: func() { - testutil.Ok(t, bkt.Upload(ctx, path.Join(ULID(4).String(), "some-file"), bytes.NewBuffer([]byte("something")))) + expectedMetas: ULIDs(1, 2, 3), + expectedCorruptedMeta: ULIDs(), + expectedNoMeta: ULIDs(4), }, + { + name: "corrupted meta.json", + do: func() { + testutil.Ok(t, bkt.Upload(ctx, path.Join(ULID(5).String(), MetaFilename), bytes.NewBuffer([]byte("{ not a json")))) + if lister == birthstoneLister { + testutil.Ok(t, bkt.Upload(ctx, path.Join(BirthstoneDirname, ULID(5).String()), strings.NewReader(""))) + } + }, - expectedMetas: ULIDs(1, 2, 3), - expectedCorruptedMeta: ULIDs(), - expectedNoMeta: ULIDs(4), - }, - { - name: "corrupted meta.json", - do: func() { - testutil.Ok(t, bkt.Upload(ctx, path.Join(ULID(5).String(), MetaFilename), bytes.NewBuffer([]byte("{ not a json")))) + expectedMetas: ULIDs(1, 2, 3), + expectedCorruptedMeta: ULIDs(5), + expectedNoMeta: ULIDs(4), }, + { + name: "some added some deleted", + do: func() { + testutil.Ok(t, Delete(ctx, log.NewNopLogger(), bkt, ULID(2))) + + var meta metadata.Meta + meta.Version = 1 + meta.ULID = ULID(6) + + var buf bytes.Buffer + testutil.Ok(t, json.NewEncoder(&buf).Encode(&meta)) + testutil.Ok(t, bkt.Upload(ctx, path.Join(meta.ULID.String(), metadata.MetaFilename), &buf)) + if lister == birthstoneLister { + testutil.Ok(t, bkt.Upload(ctx, path.Join(BirthstoneDirname, meta.ULID.String()), strings.NewReader(""))) + } + }, - expectedMetas: ULIDs(1, 2, 3), - expectedCorruptedMeta: ULIDs(5), - expectedNoMeta: ULIDs(4), - }, - { - name: "some added some deleted", - do: func() { - testutil.Ok(t, Delete(ctx, log.NewNopLogger(), bkt, ULID(2))) - - var meta metadata.Meta - meta.Version = 1 - meta.ULID = ULID(6) - - var buf bytes.Buffer - testutil.Ok(t, json.NewEncoder(&buf).Encode(&meta)) - testutil.Ok(t, bkt.Upload(ctx, path.Join(meta.ULID.String(), metadata.MetaFilename), &buf)) + expectedMetas: ULIDs(1, 3, 6), + expectedCorruptedMeta: ULIDs(5), + expectedNoMeta: ULIDs(4), }, + { + name: "filter not existing ulid", + do: func() {}, + filterULID: ULID(10), - expectedMetas: ULIDs(1, 3, 6), - expectedCorruptedMeta: ULIDs(5), - expectedNoMeta: ULIDs(4), - }, - { - name: "filter not existing ulid", - do: func() {}, - filterULID: ULID(10), - - expectedMetas: ULIDs(1, 3, 6), - expectedCorruptedMeta: ULIDs(5), - expectedNoMeta: ULIDs(4), - }, - { - name: "filter ulid 1", - do: func() {}, - filterULID: ULID(1), - - expectedMetas: ULIDs(3, 6), - expectedCorruptedMeta: ULIDs(5), - expectedNoMeta: ULIDs(4), - expectedFiltered: 1, - }, - { - name: "error: not supported meta version", - do: func() { - var meta metadata.Meta - meta.Version = 20 - meta.ULID = ULID(7) - - var buf bytes.Buffer - testutil.Ok(t, json.NewEncoder(&buf).Encode(&meta)) - testutil.Ok(t, bkt.Upload(ctx, path.Join(meta.ULID.String(), metadata.MetaFilename), &buf)) - }, - - expectedMetas: ULIDs(1, 3, 6), - expectedCorruptedMeta: ULIDs(5), - expectedNoMeta: ULIDs(4), - expectedMetaErr: errors.New("incomplete view: unexpected meta file: 00000000070000000000000000/meta.json version: 20"), - }, - } { - if ok := t.Run(tcase.name, func(t *testing.T) { - tcase.do() - - ulidToDelete = tcase.filterULID - metas, partial, err := fetcher.Fetch(ctx) - if tcase.expectedMetaErr != nil { - testutil.NotOk(t, err) - testutil.Equals(t, tcase.expectedMetaErr.Error(), err.Error()) - } else { - testutil.Ok(t, err) - } - + expectedMetas: ULIDs(1, 3, 6), + expectedCorruptedMeta: ULIDs(5), + expectedNoMeta: ULIDs(4), + }, { - metasSlice := make([]ulid.ULID, 0, len(metas)) - for id, m := range metas { - testutil.Assert(t, m != nil, "meta is nil") - metasSlice = append(metasSlice, id) + name: "filter ulid 1", + do: func() {}, + filterULID: ULID(1), + + expectedMetas: ULIDs(3, 6), + expectedCorruptedMeta: ULIDs(5), + expectedNoMeta: ULIDs(4), + expectedFiltered: 1, + }, + { + name: "error: not supported meta version", + do: func() { + var meta metadata.Meta + meta.Version = 20 + meta.ULID = ULID(7) + + var buf bytes.Buffer + testutil.Ok(t, json.NewEncoder(&buf).Encode(&meta)) + testutil.Ok(t, bkt.Upload(ctx, path.Join(meta.ULID.String(), metadata.MetaFilename), &buf)) + if lister == birthstoneLister { + testutil.Ok(t, bkt.Upload(ctx, path.Join(BirthstoneDirname, meta.ULID.String()), strings.NewReader(""))) + } + }, + + expectedMetas: ULIDs(1, 3, 6), + expectedCorruptedMeta: ULIDs(5), + expectedNoMeta: ULIDs(4), + expectedMetaErr: errors.New("incomplete view: unexpected meta file: 00000000070000000000000000/meta.json version: 20"), + }, + } { + if ok := t.Run(tcase.name, func(t *testing.T) { + tcase.do() + + ulidToDelete = tcase.filterULID + metas, partial, err := fetcher.Fetch(ctx) + if tcase.expectedMetaErr != nil { + testutil.NotOk(t, err) + testutil.Equals(t, tcase.expectedMetaErr.Error(), err.Error()) + } else { + testutil.Ok(t, err) } - sort.Slice(metasSlice, func(i, j int) bool { - return metasSlice[i].Compare(metasSlice[j]) < 0 - }) - testutil.Equals(t, tcase.expectedMetas, metasSlice) - } - { - partialSlice := make([]ulid.ULID, 0, len(partial)) - for id := range partial { + { + metasSlice := make([]ulid.ULID, 0, len(metas)) + for id, m := range metas { + testutil.Assert(t, m != nil, "meta is nil") + metasSlice = append(metasSlice, id) + } + sort.Slice(metasSlice, func(i, j int) bool { + return metasSlice[i].Compare(metasSlice[j]) < 0 + }) + testutil.Equals(t, tcase.expectedMetas, metasSlice) + } - partialSlice = append(partialSlice, id) + { + partialSlice := make([]ulid.ULID, 0, len(partial)) + for id := range partial { + + partialSlice = append(partialSlice, id) + } + sort.Slice(partialSlice, func(i, j int) bool { + return partialSlice[i].Compare(partialSlice[j]) >= 0 + }) + expected := append([]ulid.ULID{}, tcase.expectedCorruptedMeta...) + expected = append(expected, tcase.expectedNoMeta...) + sort.Slice(expected, func(i, j int) bool { + return expected[i].Compare(expected[j]) >= 0 + }) + testutil.Equals(t, expected, partialSlice) } - sort.Slice(partialSlice, func(i, j int) bool { - return partialSlice[i].Compare(partialSlice[j]) >= 0 - }) - expected := append([]ulid.ULID{}, tcase.expectedCorruptedMeta...) - expected = append(expected, tcase.expectedNoMeta...) - sort.Slice(expected, func(i, j int) bool { - return expected[i].Compare(expected[j]) >= 0 - }) - testutil.Equals(t, expected, partialSlice) - } - expectedFailures := 0 - if tcase.expectedMetaErr != nil { - expectedFailures = 1 + expectedFailures := 0 + if tcase.expectedMetaErr != nil { + expectedFailures = 1 + } + testutil.Equals(t, float64(i+1), promtest.ToFloat64(baseFetcher.metrics.Syncs)) + testutil.Equals(t, float64(i+1), promtest.ToFloat64(fetcher.metrics.Syncs)) + testutil.Equals(t, float64(len(tcase.expectedMetas)), promtest.ToFloat64(fetcher.metrics.Synced.WithLabelValues(LoadedMeta))) + testutil.Equals(t, float64(len(tcase.expectedNoMeta)), promtest.ToFloat64(fetcher.metrics.Synced.WithLabelValues(NoMeta))) + testutil.Equals(t, float64(tcase.expectedFiltered), promtest.ToFloat64(fetcher.metrics.Synced.WithLabelValues("filtered"))) + testutil.Equals(t, 0.0, promtest.ToFloat64(fetcher.metrics.Synced.WithLabelValues(labelExcludedMeta))) + testutil.Equals(t, 0.0, promtest.ToFloat64(fetcher.metrics.Synced.WithLabelValues(timeExcludedMeta))) + testutil.Equals(t, float64(expectedFailures), promtest.ToFloat64(fetcher.metrics.Synced.WithLabelValues(FailedMeta))) + testutil.Equals(t, 0.0, promtest.ToFloat64(fetcher.metrics.Synced.WithLabelValues(tooFreshMeta))) + }); !ok { + return } - testutil.Equals(t, float64(i+1), promtest.ToFloat64(baseFetcher.metrics.Syncs)) - testutil.Equals(t, float64(i+1), promtest.ToFloat64(fetcher.metrics.Syncs)) - testutil.Equals(t, float64(len(tcase.expectedMetas)), promtest.ToFloat64(fetcher.metrics.Synced.WithLabelValues(LoadedMeta))) - testutil.Equals(t, float64(len(tcase.expectedNoMeta)), promtest.ToFloat64(fetcher.metrics.Synced.WithLabelValues(NoMeta))) - testutil.Equals(t, float64(tcase.expectedFiltered), promtest.ToFloat64(fetcher.metrics.Synced.WithLabelValues("filtered"))) - testutil.Equals(t, 0.0, promtest.ToFloat64(fetcher.metrics.Synced.WithLabelValues(labelExcludedMeta))) - testutil.Equals(t, 0.0, promtest.ToFloat64(fetcher.metrics.Synced.WithLabelValues(timeExcludedMeta))) - testutil.Equals(t, float64(expectedFailures), promtest.ToFloat64(fetcher.metrics.Synced.WithLabelValues(FailedMeta))) - testutil.Equals(t, 0.0, promtest.ToFloat64(fetcher.metrics.Synced.WithLabelValues(tooFreshMeta))) - }); !ok { - return } - } + }) + } + + t.Run("concurrentLister", func(t *testing.T) { + runTest(t, concurrentLister) + }) + t.Run("recursiveLister", func(t *testing.T) { + runTest(t, recursiveLister) + }) + t.Run("birthstoneLister", func(t *testing.T) { + runTest(t, birthstoneLister) }) } diff --git a/pkg/block/indexheader/header_test.go b/pkg/block/indexheader/header_test.go index b94a857f640..99f7b95a423 100644 --- a/pkg/block/indexheader/header_test.go +++ b/pkg/block/indexheader/header_test.go @@ -63,7 +63,7 @@ func TestReaders(t *testing.T) { }, 100, 0, 1000, labels.FromStrings("ext1", "1"), 124, metadata.NoneFunc) testutil.Ok(t, err) - testutil.Ok(t, block.Upload(ctx, log.NewNopLogger(), bkt, filepath.Join(tmpDir, id1.String()), metadata.NoneFunc)) + testutil.Ok(t, block.Upload(ctx, log.NewNopLogger(), bkt, filepath.Join(tmpDir, id1.String()), metadata.NoneFunc, false)) // Copy block index version 1 for backward compatibility. /* The block here was produced at the commit @@ -92,7 +92,7 @@ func TestReaders(t *testing.T) { Source: metadata.TestSource, }, &m.BlockMeta) testutil.Ok(t, err) - testutil.Ok(t, block.Upload(ctx, log.NewNopLogger(), bkt, filepath.Join(tmpDir, m.ULID.String()), metadata.NoneFunc)) + testutil.Ok(t, block.Upload(ctx, log.NewNopLogger(), bkt, filepath.Join(tmpDir, m.ULID.String()), metadata.NoneFunc, false)) for _, id := range []ulid.ULID{id1, m.ULID} { t.Run(id.String(), func(t *testing.T) { @@ -386,7 +386,7 @@ func prepareIndexV2Block(t testing.TB, tmpDir string, bkt objstore.Bucket) *meta Source: metadata.TestSource, }, &m.BlockMeta) testutil.Ok(t, err) - testutil.Ok(t, block.Upload(context.Background(), log.NewNopLogger(), bkt, filepath.Join(tmpDir, m.ULID.String()), metadata.NoneFunc)) + testutil.Ok(t, block.Upload(context.Background(), log.NewNopLogger(), bkt, filepath.Join(tmpDir, m.ULID.String()), metadata.NoneFunc, false)) return m } @@ -459,7 +459,7 @@ func benchmarkBinaryReaderLookupSymbol(b *testing.B, numSeries int) { // Create a block. id1, err := e2eutil.CreateBlock(ctx, tmpDir, seriesLabels, 100, 0, 1000, labels.FromStrings("ext1", "1"), 124, metadata.NoneFunc) testutil.Ok(b, err) - testutil.Ok(b, block.Upload(ctx, logger, bkt, filepath.Join(tmpDir, id1.String()), metadata.NoneFunc)) + testutil.Ok(b, block.Upload(ctx, logger, bkt, filepath.Join(tmpDir, id1.String()), metadata.NoneFunc, false)) // Create an index reader. reader, err := NewBinaryReader(ctx, logger, bkt, tmpDir, id1, postingOffsetsInMemSampling, NewBinaryReaderMetrics(nil)) @@ -594,7 +594,7 @@ func TestReaderPostingsOffsets(t *testing.T) { id, err := e2eutil.CreateBlock(ctx, tmpDir, lbls, 100, 0, 1000, labels.FromStrings("ext1", "1"), 124, metadata.NoneFunc) testutil.Ok(t, err) - testutil.Ok(t, block.Upload(ctx, log.NewNopLogger(), bkt, filepath.Join(tmpDir, id.String()), metadata.NoneFunc)) + testutil.Ok(t, block.Upload(ctx, log.NewNopLogger(), bkt, filepath.Join(tmpDir, id.String()), metadata.NoneFunc, false)) fn := filepath.Join(tmpDir, id.String(), block.IndexHeaderFilename) _, err = WriteBinary(ctx, bkt, id, fn) diff --git a/pkg/block/indexheader/lazy_binary_reader_test.go b/pkg/block/indexheader/lazy_binary_reader_test.go index 73c47f06fe0..f22a6b8e88b 100644 --- a/pkg/block/indexheader/lazy_binary_reader_test.go +++ b/pkg/block/indexheader/lazy_binary_reader_test.go @@ -65,7 +65,7 @@ func TestNewLazyBinaryReader_ShouldBuildIndexHeaderFromBucket(t *testing.T) { labels.FromStrings("a", "2"), }, 100, 0, 1000, labels.FromStrings("ext1", "1"), 124, metadata.NoneFunc) testutil.Ok(t, err) - testutil.Ok(t, block.Upload(ctx, log.NewNopLogger(), bkt, filepath.Join(tmpDir, blockID.String()), metadata.NoneFunc)) + testutil.Ok(t, block.Upload(ctx, log.NewNopLogger(), bkt, filepath.Join(tmpDir, blockID.String()), metadata.NoneFunc, false)) m := NewLazyBinaryReaderMetrics(nil) bm := NewBinaryReaderMetrics(nil) @@ -116,7 +116,7 @@ func TestNewLazyBinaryReader_ShouldRebuildCorruptedIndexHeader(t *testing.T) { labels.FromStrings("a", "2"), }, 100, 0, 1000, labels.FromStrings("ext1", "1"), 124, metadata.NoneFunc) testutil.Ok(t, err) - testutil.Ok(t, block.Upload(ctx, log.NewNopLogger(), bkt, filepath.Join(tmpDir, blockID.String()), metadata.NoneFunc)) + testutil.Ok(t, block.Upload(ctx, log.NewNopLogger(), bkt, filepath.Join(tmpDir, blockID.String()), metadata.NoneFunc, false)) // Write a corrupted index-header for the block. headerFilename := filepath.Join(tmpDir, blockID.String(), block.IndexHeaderFilename) @@ -159,7 +159,7 @@ func TestLazyBinaryReader_ShouldReopenOnUsageAfterClose(t *testing.T) { labels.FromStrings("a", "2"), }, 100, 0, 1000, labels.FromStrings("ext1", "1"), 124, metadata.NoneFunc) testutil.Ok(t, err) - testutil.Ok(t, block.Upload(ctx, log.NewNopLogger(), bkt, filepath.Join(tmpDir, blockID.String()), metadata.NoneFunc)) + testutil.Ok(t, block.Upload(ctx, log.NewNopLogger(), bkt, filepath.Join(tmpDir, blockID.String()), metadata.NoneFunc, false)) for _, lazyDownload := range []bool{false, true} { t.Run(fmt.Sprintf("lazyDownload=%v", lazyDownload), func(t *testing.T) { @@ -214,7 +214,7 @@ func TestLazyBinaryReader_unload_ShouldReturnErrorIfNotIdle(t *testing.T) { labels.FromStrings("a", "2"), }, 100, 0, 1000, labels.FromStrings("ext1", "1"), 124, metadata.NoneFunc) testutil.Ok(t, err) - testutil.Ok(t, block.Upload(ctx, log.NewNopLogger(), bkt, filepath.Join(tmpDir, blockID.String()), metadata.NoneFunc)) + testutil.Ok(t, block.Upload(ctx, log.NewNopLogger(), bkt, filepath.Join(tmpDir, blockID.String()), metadata.NoneFunc, false)) for _, lazyDownload := range []bool{false, true} { t.Run(fmt.Sprintf("lazyDownload=%v", lazyDownload), func(t *testing.T) { @@ -268,7 +268,7 @@ func TestLazyBinaryReader_LoadUnloadRaceCondition(t *testing.T) { labels.FromStrings("a", "2"), }, 100, 0, 1000, labels.FromStrings("ext1", "1"), 124, metadata.NoneFunc) testutil.Ok(t, err) - testutil.Ok(t, block.Upload(ctx, log.NewNopLogger(), bkt, filepath.Join(tmpDir, blockID.String()), metadata.NoneFunc)) + testutil.Ok(t, block.Upload(ctx, log.NewNopLogger(), bkt, filepath.Join(tmpDir, blockID.String()), metadata.NoneFunc, false)) for _, lazyDownload := range []bool{false, true} { t.Run(fmt.Sprintf("lazyDownload=%v", lazyDownload), func(t *testing.T) { diff --git a/pkg/block/indexheader/reader_pool_test.go b/pkg/block/indexheader/reader_pool_test.go index 331d2187241..6656569673a 100644 --- a/pkg/block/indexheader/reader_pool_test.go +++ b/pkg/block/indexheader/reader_pool_test.go @@ -52,7 +52,7 @@ func TestReaderPool_NewBinaryReader(t *testing.T) { labels.FromStrings("a", "2"), }, 100, 0, 1000, labels.FromStrings("ext1", "1"), 124, metadata.NoneFunc) testutil.Ok(t, err) - testutil.Ok(t, block.Upload(ctx, log.NewNopLogger(), bkt, filepath.Join(tmpDir, blockID.String()), metadata.NoneFunc)) + testutil.Ok(t, block.Upload(ctx, log.NewNopLogger(), bkt, filepath.Join(tmpDir, blockID.String()), metadata.NoneFunc, false)) meta, err := metadata.ReadFromDir(filepath.Join(tmpDir, blockID.String())) testutil.Ok(t, err) @@ -91,7 +91,7 @@ func TestReaderPool_ShouldCloseIdleLazyReaders(t *testing.T) { labels.FromStrings("a", "2"), }, 100, 0, 1000, labels.FromStrings("ext1", "1"), 124, metadata.NoneFunc) testutil.Ok(t, err) - testutil.Ok(t, block.Upload(ctx, log.NewNopLogger(), bkt, filepath.Join(tmpDir, blockID.String()), metadata.NoneFunc)) + testutil.Ok(t, block.Upload(ctx, log.NewNopLogger(), bkt, filepath.Join(tmpDir, blockID.String()), metadata.NoneFunc, false)) meta, err := metadata.ReadFromDir(filepath.Join(tmpDir, blockID.String())) testutil.Ok(t, err) diff --git a/pkg/compact/compact.go b/pkg/compact/compact.go index a5c98383319..02479dbb0cc 100644 --- a/pkg/compact/compact.go +++ b/pkg/compact/compact.go @@ -282,6 +282,7 @@ type DefaultGrouper struct { hashFunc metadata.HashFunc blockFilesConcurrency int compactBlocksFetchConcurrency int + enableBirthstone bool } // NewDefaultGrouper makes a new DefaultGrouper. @@ -297,6 +298,7 @@ func NewDefaultGrouper( hashFunc metadata.HashFunc, blockFilesConcurrency int, compactBlocksFetchConcurrency int, + enableBirthstone bool, ) *DefaultGrouper { return &DefaultGrouper{ bkt: bkt, @@ -329,6 +331,7 @@ func NewDefaultGrouper( hashFunc: hashFunc, blockFilesConcurrency: blockFilesConcurrency, compactBlocksFetchConcurrency: compactBlocksFetchConcurrency, + enableBirthstone: enableBirthstone, } } @@ -398,6 +401,7 @@ func (g *DefaultGrouper) Groups(blocks map[ulid.ULID]*metadata.Meta) (res []*Gro g.hashFunc, g.blockFilesConcurrency, g.compactBlocksFetchConcurrency, + g.enableBirthstone, ) if err != nil { return nil, errors.Wrap(err, "create compaction group") @@ -439,6 +443,7 @@ type Group struct { blockFilesConcurrency int compactBlocksFetchConcurrency int extensions any + enableBirthstone bool } // NewGroup returns a new compaction group. @@ -461,6 +466,7 @@ func NewGroup( hashFunc metadata.HashFunc, blockFilesConcurrency int, compactBlocksFetchConcurrency int, + enableBirthstone bool, ) (*Group, error) { if logger == nil { logger = log.NewNopLogger() @@ -489,6 +495,7 @@ func NewGroup( hashFunc: hashFunc, blockFilesConcurrency: blockFilesConcurrency, compactBlocksFetchConcurrency: compactBlocksFetchConcurrency, + enableBirthstone: enableBirthstone, } return g, nil } @@ -1089,7 +1096,7 @@ func (cg *Group) areBlocksOverlapping(include *metadata.Meta, exclude ...*metada } // RepairIssue347 repairs the https://github.com/prometheus/tsdb/issues/347 issue when having issue347Error. -func RepairIssue347(ctx context.Context, logger log.Logger, bkt objstore.Bucket, blocksMarkedForDeletion prometheus.Counter, issue347Err error) error { +func RepairIssue347(ctx context.Context, logger log.Logger, bkt objstore.Bucket, blocksMarkedForDeletion prometheus.Counter, enableBirthstone bool, issue347Err error) error { ie, ok := errors.Cause(issue347Err).(Issue347Error) if !ok { return errors.Errorf("Given error is not an issue347 error: %v", issue347Err) @@ -1129,7 +1136,7 @@ func RepairIssue347(ctx context.Context, logger log.Logger, bkt objstore.Bucket, } level.Info(logger).Log("msg", "uploading repaired block", "newID", resid) - if err = block.Upload(ctx, logger, bkt, filepath.Join(tmpdir, resid.String()), metadata.NoneFunc); err != nil { + if err = block.Upload(ctx, logger, bkt, filepath.Join(tmpdir, resid.String()), metadata.NoneFunc, enableBirthstone); err != nil { return retry(errors.Wrapf(err, "upload of %s failed", resid)) } @@ -1334,7 +1341,7 @@ func (cg *Group) compact(ctx context.Context, dir string, planner Planner, comp begin = time.Now() err = tracing.DoInSpanWithErr(ctx, "compaction_block_upload", func(ctx context.Context) error { - return block.Upload(ctx, cg.logger, cg.bkt, bdir, cg.hashFunc, objstore.WithUploadConcurrency(cg.blockFilesConcurrency)) + return block.Upload(ctx, cg.logger, cg.bkt, bdir, cg.hashFunc, cg.enableBirthstone, objstore.WithUploadConcurrency(cg.blockFilesConcurrency)) }) if err != nil { return false, nil, retry(errors.Wrapf(err, "upload of %s failed", compID)) @@ -1394,6 +1401,7 @@ type BucketCompactor struct { bkt objstore.Bucket concurrency int skipBlocksWithOutOfOrderChunks bool + enableBirthstone bool } // NewBucketCompactor creates a new bucket compactor. @@ -1407,6 +1415,7 @@ func NewBucketCompactor( bkt objstore.Bucket, concurrency int, skipBlocksWithOutOfOrderChunks bool, + enableBirthstone bool, ) (*BucketCompactor, error) { if concurrency < 0 { return nil, errors.Errorf("invalid concurrency level (%d), concurrency level must be > 0", concurrency) @@ -1423,6 +1432,7 @@ func NewBucketCompactor( bkt, concurrency, skipBlocksWithOutOfOrderChunks, + enableBirthstone, ) } @@ -1438,6 +1448,7 @@ func NewBucketCompactorWithCheckerAndCallback( bkt objstore.Bucket, concurrency int, skipBlocksWithOutOfOrderChunks bool, + enableBirthstone bool, ) (*BucketCompactor, error) { if concurrency < 0 { return nil, errors.Errorf("invalid concurrency level (%d), concurrency level must be > 0", concurrency) @@ -1454,6 +1465,7 @@ func NewBucketCompactorWithCheckerAndCallback( bkt: bkt, concurrency: concurrency, skipBlocksWithOutOfOrderChunks: skipBlocksWithOutOfOrderChunks, + enableBirthstone: enableBirthstone, }, nil } @@ -1506,7 +1518,7 @@ func (c *BucketCompactor) Compact(ctx context.Context, progress *Progress) (rerr } if IsIssue347Error(err) { - if err := RepairIssue347(workCtx, c.logger, c.bkt, c.sy.metrics.BlocksMarkedForDeletion, err); err == nil { + if err := RepairIssue347(workCtx, c.logger, c.bkt, c.sy.metrics.BlocksMarkedForDeletion, c.enableBirthstone, err); err == nil { mtx.Lock() finishedAllGroups = false mtx.Unlock() diff --git a/pkg/compact/compact_e2e_test.go b/pkg/compact/compact_e2e_test.go index 57556fb3e2a..63f45f6b216 100644 --- a/pkg/compact/compact_e2e_test.go +++ b/pkg/compact/compact_e2e_test.go @@ -141,7 +141,7 @@ func TestSyncer_GarbageCollect_e2e(t *testing.T) { testutil.Ok(t, sy.GarbageCollect(ctx)) // Only the level 3 block, the last source block in both resolutions should be left. - grouper := NewDefaultGrouper(nil, bkt, false, false, nil, blocksMarkedForDeletion, garbageCollectedBlocks, blockMarkedForNoCompact, metadata.NoneFunc, 10, 10) + grouper := NewDefaultGrouper(nil, bkt, false, false, nil, blocksMarkedForDeletion, garbageCollectedBlocks, blockMarkedForNoCompact, metadata.NoneFunc, 10, 10, false) groups, err := grouper.Groups(sy.Metas()) testutil.Ok(t, err) @@ -216,8 +216,8 @@ func testGroupCompactE2e(t *testing.T, mergeFunc storage.VerticalChunkSeriesMerg testutil.Ok(t, err) planner := NewPlanner(logger, []int64{1000, 3000}, noCompactMarkerFilter) - grouper := NewDefaultGrouper(logger, bkt, false, false, reg, blocksMarkedForDeletion, garbageCollectedBlocks, blocksMaredForNoCompact, metadata.NoneFunc, 10, 10) - bComp, err := NewBucketCompactor(logger, sy, grouper, planner, comp, dir, bkt, 2, true) + grouper := NewDefaultGrouper(logger, bkt, false, false, reg, blocksMarkedForDeletion, garbageCollectedBlocks, blocksMaredForNoCompact, metadata.NoneFunc, 10, 10, false) + bComp, err := NewBucketCompactor(logger, sy, grouper, planner, comp, dir, bkt, 2, true, false) testutil.Ok(t, err) // Compaction on empty should not fail. @@ -422,7 +422,7 @@ func createAndUpload(t testing.TB, bkt objstore.Bucket, blocks []blockgenSpec) ( for _, b := range blocks { id, meta := createBlock(t, ctx, prepareDir, b) metas = append(metas, meta) - testutil.Ok(t, block.Upload(ctx, log.NewNopLogger(), bkt, filepath.Join(prepareDir, id.String()), metadata.NoneFunc)) + testutil.Ok(t, block.Upload(ctx, log.NewNopLogger(), bkt, filepath.Join(prepareDir, id.String()), metadata.NoneFunc, false)) } return metas diff --git a/pkg/compact/compact_test.go b/pkg/compact/compact_test.go index 45057746517..8c0f52f8fe4 100644 --- a/pkg/compact/compact_test.go +++ b/pkg/compact/compact_test.go @@ -226,7 +226,7 @@ func TestRetentionProgressCalculate(t *testing.T) { var bkt objstore.Bucket temp := promauto.With(reg).NewCounter(prometheus.CounterOpts{Name: "test_metric_for_group", Help: "this is a test metric for compact progress tests"}) - grouper := NewDefaultGrouper(logger, bkt, false, false, reg, temp, temp, temp, "", 1, 1) + grouper := NewDefaultGrouper(logger, bkt, false, false, reg, temp, temp, temp, "", 1, 1, false) type retInput struct { meta []*metadata.Meta @@ -371,7 +371,7 @@ func TestCompactProgressCalculate(t *testing.T) { var bkt objstore.Bucket temp := promauto.With(reg).NewCounter(prometheus.CounterOpts{Name: "test_metric_for_group", Help: "this is a test metric for compact progress tests"}) - grouper := NewDefaultGrouper(logger, bkt, false, false, reg, temp, temp, temp, "", 1, 1) + grouper := NewDefaultGrouper(logger, bkt, false, false, reg, temp, temp, temp, "", 1, 1, false) for _, tcase := range []struct { testName string @@ -470,7 +470,7 @@ func TestDownsampleProgressCalculate(t *testing.T) { var bkt objstore.Bucket temp := promauto.With(reg).NewCounter(prometheus.CounterOpts{Name: "test_metric_for_group", Help: "this is a test metric for downsample progress tests"}) - grouper := NewDefaultGrouper(logger, bkt, false, false, reg, temp, temp, temp, "", 1, 1) + grouper := NewDefaultGrouper(logger, bkt, false, false, reg, temp, temp, temp, "", 1, 1, false) for _, tcase := range []struct { testName string diff --git a/pkg/receive/multitsdb.go b/pkg/receive/multitsdb.go index 68be1c96c19..ba6feb0af79 100644 --- a/pkg/receive/multitsdb.go +++ b/pkg/receive/multitsdb.go @@ -67,6 +67,7 @@ type MultiTSDB struct { metricNameFilterEnabled bool matcherConverter *storepb.MatcherConverter + enableBirthstone bool } // MultiTSDBOption is a functional option for MultiTSDB. @@ -79,6 +80,13 @@ func WithMetricNameFilterEnabled() MultiTSDBOption { } } +// WithBirthstoneEnabled enables birthstone during block upload. +func WithBirthstoneEnabled() MultiTSDBOption { + return func(s *MultiTSDB) { + s.enableBirthstone = true + } +} + // WithMatcherConverter enables caching matcher converter consumed by children TSDB Stores. func WithMatcherConverter(mc *storepb.MatcherConverter) MultiTSDBOption { return func(s *MultiTSDB) { @@ -744,6 +752,7 @@ func (t *MultiTSDB) startTSDB(logger log.Logger, tenantID string, tenant *tenant t.allowOutOfOrderUpload, t.hashFunc, shipper.DefaultMetaFilename, + t.enableBirthstone, ) } options := []store.TSDBStoreOption{} diff --git a/pkg/receive/multitsdb_test.go b/pkg/receive/multitsdb_test.go index a36db4b402f..590da562f8f 100644 --- a/pkg/receive/multitsdb_test.go +++ b/pkg/receive/multitsdb_test.go @@ -957,7 +957,7 @@ func TestMultiTSDBDoesNotDeleteNotUploadedBlocks(t *testing.T) { Uploaded: []ulid.ULID{mockBlockIDs[0]}, })) - tenant.ship = shipper.New(log.NewNopLogger(), nil, td, nil, nil, metadata.BucketUploadSource, nil, false, metadata.NoneFunc, "") + tenant.ship = shipper.New(log.NewNopLogger(), nil, td, nil, nil, metadata.BucketUploadSource, nil, false, metadata.NoneFunc, "", false) require.Equal(t, map[ulid.ULID]struct{}{ mockBlockIDs[0]: {}, }, tenant.blocksToDelete(nil)) diff --git a/pkg/reloader/reloader_test.go b/pkg/reloader/reloader_test.go index 74629d7122c..f566658d393 100644 --- a/pkg/reloader/reloader_test.go +++ b/pkg/reloader/reloader_test.go @@ -315,6 +315,7 @@ faulty_config: } func TestReloader_ConfigDirApply(t *testing.T) { + t.Skip("flaky on CI. https://github.com/thanos-io/thanos/issues/8114") t.Parallel() l, err := net.Listen("tcp", "localhost:0") @@ -618,6 +619,7 @@ func TestReloader_ConfigDirApply(t *testing.T) { } func TestReloader_ConfigDirApplyBasedOnWatchInterval(t *testing.T) { + t.Skip("flaky on CI. https://github.com/thanos-io/thanos/issues/8114") t.Parallel() l, err := net.Listen("tcp", "localhost:0") @@ -829,6 +831,7 @@ func TestReloader_ConfigDirApplyBasedOnWatchInterval(t *testing.T) { } func TestReloader_DirectoriesApply(t *testing.T) { + t.Skip("flaky on CI.") t.Parallel() l, err := net.Listen("tcp", "localhost:0") diff --git a/pkg/shipper/shipper.go b/pkg/shipper/shipper.go index 6b7be6cd12d..7c855a65e57 100644 --- a/pkg/shipper/shipper.go +++ b/pkg/shipper/shipper.go @@ -78,6 +78,7 @@ type Shipper struct { uploadCompactedFunc func() bool allowOutOfOrderUploads bool + enableBirthstone bool hashFunc metadata.HashFunc labels func() labels.Labels @@ -98,6 +99,7 @@ func New( allowOutOfOrderUploads bool, hashFunc metadata.HashFunc, metaFileName string, + enableBirthstone bool, ) *Shipper { if logger == nil { logger = log.NewNopLogger() @@ -123,6 +125,7 @@ func New( metrics: newMetrics(r), source: source, allowOutOfOrderUploads: allowOutOfOrderUploads, + enableBirthstone: enableBirthstone, uploadCompactedFunc: uploadCompactedFunc, hashFunc: hashFunc, metadataFilePath: filepath.Join(dir, filepath.Clean(metaFileName)), @@ -403,7 +406,7 @@ func (s *Shipper) upload(ctx context.Context, meta *metadata.Meta) error { if err := meta.WriteToDir(s.logger, updir); err != nil { return errors.Wrap(err, "write meta file") } - return block.Upload(ctx, s.logger, s.bucket, updir, s.hashFunc) + return block.Upload(ctx, s.logger, s.bucket, updir, s.hashFunc, s.enableBirthstone) } // blockMetasFromOldest returns the block meta of each block found in dir diff --git a/pkg/shipper/shipper_e2e_test.go b/pkg/shipper/shipper_e2e_test.go index ddb963339c5..da0b2b23077 100644 --- a/pkg/shipper/shipper_e2e_test.go +++ b/pkg/shipper/shipper_e2e_test.go @@ -44,7 +44,7 @@ func TestShipper_SyncBlocks_e2e(t *testing.T) { dir := t.TempDir() extLset := labels.FromStrings("prometheus", "prom-1") - shipper := New(log.NewLogfmtLogger(os.Stderr), nil, dir, metricsBucket, func() labels.Labels { return extLset }, metadata.TestSource, nil, false, metadata.NoneFunc, DefaultMetaFilename) + shipper := New(log.NewLogfmtLogger(os.Stderr), nil, dir, metricsBucket, func() labels.Labels { return extLset }, metadata.TestSource, nil, false, metadata.NoneFunc, DefaultMetaFilename, false) ctx, cancel := context.WithCancel(context.Background()) defer cancel() @@ -212,7 +212,7 @@ func TestShipper_SyncBlocksWithMigrating_e2e(t *testing.T) { testutil.Ok(t, p.Restart(context.Background(), logger)) uploadCompactedFunc := func() bool { return true } - shipper := New(log.NewLogfmtLogger(os.Stderr), nil, dir, bkt, func() labels.Labels { return extLset }, metadata.TestSource, uploadCompactedFunc, false, metadata.NoneFunc, DefaultMetaFilename) + shipper := New(log.NewLogfmtLogger(os.Stderr), nil, dir, bkt, func() labels.Labels { return extLset }, metadata.TestSource, uploadCompactedFunc, false, metadata.NoneFunc, DefaultMetaFilename, false) // Create 10 new blocks. 9 of them (non compacted) should be actually uploaded. var ( @@ -361,7 +361,7 @@ func TestShipper_SyncOverlapBlocks_e2e(t *testing.T) { uploadCompactedFunc := func() bool { return true } // Here, the allowOutOfOrderUploads flag is set to true, which allows blocks with overlaps to be uploaded. - shipper := New(log.NewLogfmtLogger(os.Stderr), nil, dir, bkt, func() labels.Labels { return extLset }, metadata.TestSource, uploadCompactedFunc, true, metadata.NoneFunc, DefaultMetaFilename) + shipper := New(log.NewLogfmtLogger(os.Stderr), nil, dir, bkt, func() labels.Labels { return extLset }, metadata.TestSource, uploadCompactedFunc, true, metadata.NoneFunc, DefaultMetaFilename, false) // Creating 2 overlapping blocks - both uploaded when OOO uploads allowed. var ( diff --git a/pkg/shipper/shipper_test.go b/pkg/shipper/shipper_test.go index 1a5cd5c75ac..0eee15f61a4 100644 --- a/pkg/shipper/shipper_test.go +++ b/pkg/shipper/shipper_test.go @@ -29,7 +29,7 @@ import ( func TestShipperTimestamps(t *testing.T) { dir := t.TempDir() - s := New(nil, nil, dir, nil, nil, metadata.TestSource, nil, false, metadata.NoneFunc, DefaultMetaFilename) + s := New(nil, nil, dir, nil, nil, metadata.TestSource, nil, false, metadata.NoneFunc, DefaultMetaFilename, false) // Missing thanos meta file. _, _, err := s.Timestamps() @@ -122,7 +122,7 @@ func TestIterBlockMetas(t *testing.T) { }, }.WriteToDir(log.NewNopLogger(), path.Join(dir, id3.String()))) - shipper := New(nil, nil, dir, nil, nil, metadata.TestSource, nil, false, metadata.NoneFunc, DefaultMetaFilename) + shipper := New(nil, nil, dir, nil, nil, metadata.TestSource, nil, false, metadata.NoneFunc, DefaultMetaFilename, false) metas, err := shipper.blockMetasFromOldest() testutil.Ok(t, err) testutil.Equals(t, sort.SliceIsSorted(metas, func(i, j int) bool { @@ -153,7 +153,7 @@ func BenchmarkIterBlockMetas(b *testing.B) { }) b.ResetTimer() - shipper := New(nil, nil, dir, nil, nil, metadata.TestSource, nil, false, metadata.NoneFunc, DefaultMetaFilename) + shipper := New(nil, nil, dir, nil, nil, metadata.TestSource, nil, false, metadata.NoneFunc, DefaultMetaFilename, false) _, err := shipper.blockMetasFromOldest() testutil.Ok(b, err) @@ -165,7 +165,7 @@ func TestShipperAddsSegmentFiles(t *testing.T) { inmemory := objstore.NewInMemBucket() lbls := labels.FromStrings("test", "test") - s := New(nil, nil, dir, inmemory, func() labels.Labels { return lbls }, metadata.TestSource, nil, false, metadata.NoneFunc, DefaultMetaFilename) + s := New(nil, nil, dir, inmemory, func() labels.Labels { return lbls }, metadata.TestSource, nil, false, metadata.NoneFunc, DefaultMetaFilename, false) id := ulid.MustNew(1, nil) blockDir := path.Join(dir, id.String()) @@ -235,7 +235,7 @@ func TestShipperExistingThanosLabels(t *testing.T) { inmemory := objstore.NewInMemBucket() lbls := labels.FromStrings("test", "test") - s := New(nil, nil, dir, inmemory, func() labels.Labels { return lbls }, metadata.TestSource, nil, false, metadata.NoneFunc, DefaultMetaFilename) + s := New(nil, nil, dir, inmemory, func() labels.Labels { return lbls }, metadata.TestSource, nil, false, metadata.NoneFunc, DefaultMetaFilename, false) id := ulid.MustNew(1, nil) id2 := ulid.MustNew(2, nil) diff --git a/pkg/store/acceptance_test.go b/pkg/store/acceptance_test.go index 48fc00adfbe..7aede2c276d 100644 --- a/pkg/store/acceptance_test.go +++ b/pkg/store/acceptance_test.go @@ -974,7 +974,7 @@ func TestBucketStore_Acceptance(t *testing.T) { }, nil) testutil.Ok(tt, err) - testutil.Ok(tt, block.Upload(ctx, logger, bkt, auxBlockDir, metadata.NoneFunc)) + testutil.Ok(tt, block.Upload(ctx, logger, bkt, auxBlockDir, metadata.NoneFunc, false)) } chunkPool, err := NewDefaultChunkBytesPool(2e5) @@ -1119,7 +1119,7 @@ func TestProxyStoreWithTSDBSelector_Acceptance(t *testing.T) { }, nil) testutil.Ok(tt, err) - testutil.Ok(tt, block.Upload(ctx, logger, bkt, auxBlockDir, metadata.NoneFunc)) + testutil.Ok(tt, block.Upload(ctx, logger, bkt, auxBlockDir, metadata.NoneFunc, false)) } chunkPool, err := NewDefaultChunkBytesPool(2e5) diff --git a/pkg/store/bucket_e2e_test.go b/pkg/store/bucket_e2e_test.go index bee87898c6c..37658099cd2 100644 --- a/pkg/store/bucket_e2e_test.go +++ b/pkg/store/bucket_e2e_test.go @@ -121,8 +121,8 @@ func prepareTestBlocks(t testing.TB, now time.Time, count int, dir string, bkt o meta.Thanos.Labels = map[string]string{"ext2": "value2"} testutil.Ok(t, meta.WriteToDir(logger, dir2)) - testutil.Ok(t, block.Upload(ctx, logger, bkt, dir1, metadata.NoneFunc)) - testutil.Ok(t, block.Upload(ctx, logger, bkt, dir2, metadata.NoneFunc)) + testutil.Ok(t, block.Upload(ctx, logger, bkt, dir1, metadata.NoneFunc, false)) + testutil.Ok(t, block.Upload(ctx, logger, bkt, dir2, metadata.NoneFunc, false)) testutil.Ok(t, os.RemoveAll(dir1)) testutil.Ok(t, os.RemoveAll(dir2)) diff --git a/pkg/store/bucket_test.go b/pkg/store/bucket_test.go index e8dffd093b1..9e1b6e97875 100644 --- a/pkg/store/bucket_test.go +++ b/pkg/store/bucket_test.go @@ -663,93 +663,102 @@ func TestBucketStoreConfig_validate(t *testing.T) { func TestBucketStore_TSDBInfo(t *testing.T) { t.Parallel() - ctx, cancel := context.WithCancel(context.Background()) - defer cancel() - - logger := log.NewNopLogger() - dir := t.TempDir() - - bkt := objstore.WithNoopInstr(objstore.NewInMemBucket()) - series := []labels.Labels{labels.FromStrings("a", "1", "b", "1")} + runTest := func(t *testing.T, enableBirthstone bool) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + logger := log.NewNopLogger() + dir := t.TempDir() + + bkt := objstore.WithNoopInstr(objstore.NewInMemBucket()) + series := []labels.Labels{labels.FromStrings("a", "1", "b", "1")} + + for _, tt := range []struct { + mint, maxt int64 + extLabels labels.Labels + }{ + {mint: 0, maxt: 1000, extLabels: labels.FromStrings("a", "b")}, + {mint: 1000, maxt: 2000, extLabels: labels.FromStrings("a", "b")}, + {mint: 3000, maxt: 4000, extLabels: labels.FromStrings("a", "b")}, + {mint: 3500, maxt: 5000, extLabels: labels.FromStrings("a", "b")}, + {mint: 0, maxt: 1000, extLabels: labels.FromStrings("a", "c")}, + {mint: 500, maxt: 2000, extLabels: labels.FromStrings("a", "c")}, + {mint: 0, maxt: 1000, extLabels: labels.FromStrings("a", "d")}, + {mint: 2000, maxt: 3000, extLabels: labels.FromStrings("a", "d")}, + } { + id1, err := e2eutil.CreateBlock(ctx, dir, series, 10, tt.mint, tt.maxt, tt.extLabels, 0, metadata.NoneFunc) + testutil.Ok(t, err) + testutil.Ok(t, block.Upload(ctx, logger, bkt, filepath.Join(dir, id1.String()), metadata.NoneFunc, enableBirthstone)) + } - for _, tt := range []struct { - mint, maxt int64 - extLabels labels.Labels - }{ - {mint: 0, maxt: 1000, extLabels: labels.FromStrings("a", "b")}, - {mint: 1000, maxt: 2000, extLabels: labels.FromStrings("a", "b")}, - {mint: 3000, maxt: 4000, extLabels: labels.FromStrings("a", "b")}, - {mint: 3500, maxt: 5000, extLabels: labels.FromStrings("a", "b")}, - {mint: 0, maxt: 1000, extLabels: labels.FromStrings("a", "c")}, - {mint: 500, maxt: 2000, extLabels: labels.FromStrings("a", "c")}, - {mint: 0, maxt: 1000, extLabels: labels.FromStrings("a", "d")}, - {mint: 2000, maxt: 3000, extLabels: labels.FromStrings("a", "d")}, - } { - id1, err := e2eutil.CreateBlock(ctx, dir, series, 10, tt.mint, tt.maxt, tt.extLabels, 0, metadata.NoneFunc) + baseBlockIDsFetcher := block.NewConcurrentLister(logger, bkt) + metaFetcher, err := block.NewMetaFetcher(logger, 20, bkt, baseBlockIDsFetcher, dir, nil, []block.MetadataFilter{ + block.NewTimePartitionMetaFilter(allowAllFilterConf.MinTime, allowAllFilterConf.MaxTime), + }) testutil.Ok(t, err) - testutil.Ok(t, block.Upload(ctx, logger, bkt, filepath.Join(dir, id1.String()), metadata.NoneFunc)) - } - baseBlockIDsFetcher := block.NewConcurrentLister(logger, bkt) - metaFetcher, err := block.NewMetaFetcher(logger, 20, bkt, baseBlockIDsFetcher, dir, nil, []block.MetadataFilter{ - block.NewTimePartitionMetaFilter(allowAllFilterConf.MinTime, allowAllFilterConf.MaxTime), - }) - testutil.Ok(t, err) + chunkPool, err := NewDefaultChunkBytesPool(2e5) + testutil.Ok(t, err) - chunkPool, err := NewDefaultChunkBytesPool(2e5) - testutil.Ok(t, err) + bucketStore, err := NewBucketStore( + objstore.WithNoopInstr(bkt), + metaFetcher, + dir, + NewChunksLimiterFactory(0), + NewSeriesLimiterFactory(0), + NewBytesLimiterFactory(0), + NewGapBasedPartitioner(PartitionerMaxGapSize), + 20, + true, + DefaultPostingOffsetInMemorySampling, + false, + false, + 0, + WithChunkPool(chunkPool), + WithFilterConfig(allowAllFilterConf), + ) + testutil.Ok(t, err) + defer func() { testutil.Ok(t, bucketStore.Close()) }() - bucketStore, err := NewBucketStore( - objstore.WithNoopInstr(bkt), - metaFetcher, - dir, - NewChunksLimiterFactory(0), - NewSeriesLimiterFactory(0), - NewBytesLimiterFactory(0), - NewGapBasedPartitioner(PartitionerMaxGapSize), - 20, - true, - DefaultPostingOffsetInMemorySampling, - false, - false, - 0, - WithChunkPool(chunkPool), - WithFilterConfig(allowAllFilterConf), - ) - testutil.Ok(t, err) - defer func() { testutil.Ok(t, bucketStore.Close()) }() + testutil.Ok(t, bucketStore.SyncBlocks(ctx)) + infos := bucketStore.TSDBInfos() + slices.SortFunc(infos, func(a, b infopb.TSDBInfo) int { + return strings.Compare(a.Labels.String(), b.Labels.String()) + }) + testutil.Equals(t, infos, []infopb.TSDBInfo{ + { + Labels: labelpb.ZLabelSet{Labels: []labelpb.ZLabel{{Name: "a", Value: "b"}}}, + MinTime: 0, + MaxTime: 2000, + }, + { + Labels: labelpb.ZLabelSet{Labels: []labelpb.ZLabel{{Name: "a", Value: "b"}}}, + MinTime: 3000, + MaxTime: 5000, + }, + { + Labels: labelpb.ZLabelSet{Labels: []labelpb.ZLabel{{Name: "a", Value: "c"}}}, + MinTime: 0, + MaxTime: 2000, + }, + { + Labels: labelpb.ZLabelSet{Labels: []labelpb.ZLabel{{Name: "a", Value: "d"}}}, + MinTime: 0, + MaxTime: 1000, + }, + { + Labels: labelpb.ZLabelSet{Labels: []labelpb.ZLabel{{Name: "a", Value: "d"}}}, + MinTime: 2000, + MaxTime: 3000, + }, + }) + } - testutil.Ok(t, bucketStore.SyncBlocks(ctx)) - infos := bucketStore.TSDBInfos() - slices.SortFunc(infos, func(a, b infopb.TSDBInfo) int { - return strings.Compare(a.Labels.String(), b.Labels.String()) + t.Run("enableBirthstone", func(t *testing.T) { + runTest(t, true) }) - testutil.Equals(t, infos, []infopb.TSDBInfo{ - { - Labels: labelpb.ZLabelSet{Labels: []labelpb.ZLabel{{Name: "a", Value: "b"}}}, - MinTime: 0, - MaxTime: 2000, - }, - { - Labels: labelpb.ZLabelSet{Labels: []labelpb.ZLabel{{Name: "a", Value: "b"}}}, - MinTime: 3000, - MaxTime: 5000, - }, - { - Labels: labelpb.ZLabelSet{Labels: []labelpb.ZLabel{{Name: "a", Value: "c"}}}, - MinTime: 0, - MaxTime: 2000, - }, - { - Labels: labelpb.ZLabelSet{Labels: []labelpb.ZLabel{{Name: "a", Value: "d"}}}, - MinTime: 0, - MaxTime: 1000, - }, - { - Labels: labelpb.ZLabelSet{Labels: []labelpb.ZLabel{{Name: "a", Value: "d"}}}, - MinTime: 2000, - MaxTime: 3000, - }, + t.Run("disableBirthstone", func(t *testing.T) { + runTest(t, false) }) } @@ -780,40 +789,49 @@ func (r *recorder) GetRange(ctx context.Context, name string, off, length int64) func TestBucketStore_Sharding(t *testing.T) { t.Parallel() - ctx := context.Background() - logger := log.NewNopLogger() + runTest := func(t *testing.T, enableBirthstone bool) { + ctx := context.Background() + logger := log.NewNopLogger() - dir := t.TempDir() + dir := t.TempDir() - bkt := objstore.NewInMemBucket() - series := []labels.Labels{labels.FromStrings("a", "1", "b", "1")} + bkt := objstore.NewInMemBucket() + series := []labels.Labels{labels.FromStrings("a", "1", "b", "1")} - id1, err := e2eutil.CreateBlock(ctx, dir, series, 10, 0, 1000, labels.FromStrings("cluster", "a", "region", "r1"), 0, metadata.NoneFunc) - testutil.Ok(t, err) - testutil.Ok(t, block.Upload(ctx, logger, bkt, filepath.Join(dir, id1.String()), metadata.NoneFunc)) + id1, err := e2eutil.CreateBlock(ctx, dir, series, 10, 0, 1000, labels.FromStrings("cluster", "a", "region", "r1"), 0, metadata.NoneFunc) + testutil.Ok(t, err) + testutil.Ok(t, block.Upload(ctx, logger, bkt, filepath.Join(dir, id1.String()), metadata.NoneFunc, enableBirthstone)) - id2, err := e2eutil.CreateBlock(ctx, dir, series, 10, 1000, 2000, labels.FromStrings("cluster", "a", "region", "r1"), 0, metadata.NoneFunc) - testutil.Ok(t, err) - testutil.Ok(t, block.Upload(ctx, logger, bkt, filepath.Join(dir, id2.String()), metadata.NoneFunc)) + id2, err := e2eutil.CreateBlock(ctx, dir, series, 10, 1000, 2000, labels.FromStrings("cluster", "a", "region", "r1"), 0, metadata.NoneFunc) + testutil.Ok(t, err) + testutil.Ok(t, block.Upload(ctx, logger, bkt, filepath.Join(dir, id2.String()), metadata.NoneFunc, enableBirthstone)) - id3, err := e2eutil.CreateBlock(ctx, dir, series, 10, 0, 1000, labels.FromStrings("cluster", "b", "region", "r1"), 0, metadata.NoneFunc) - testutil.Ok(t, err) - testutil.Ok(t, block.Upload(ctx, logger, bkt, filepath.Join(dir, id3.String()), metadata.NoneFunc)) + id3, err := e2eutil.CreateBlock(ctx, dir, series, 10, 0, 1000, labels.FromStrings("cluster", "b", "region", "r1"), 0, metadata.NoneFunc) + testutil.Ok(t, err) + testutil.Ok(t, block.Upload(ctx, logger, bkt, filepath.Join(dir, id3.String()), metadata.NoneFunc, enableBirthstone)) - id4, err := e2eutil.CreateBlock(ctx, dir, series, 10, 0, 1000, labels.FromStrings("cluster", "a", "region", "r2"), 0, metadata.NoneFunc) - testutil.Ok(t, err) - testutil.Ok(t, block.Upload(ctx, logger, bkt, filepath.Join(dir, id4.String()), metadata.NoneFunc)) + id4, err := e2eutil.CreateBlock(ctx, dir, series, 10, 0, 1000, labels.FromStrings("cluster", "a", "region", "r2"), 0, metadata.NoneFunc) + testutil.Ok(t, err) + testutil.Ok(t, block.Upload(ctx, logger, bkt, filepath.Join(dir, id4.String()), metadata.NoneFunc, enableBirthstone)) - if ok := t.Run("new_runs", func(t *testing.T) { - testSharding(t, "", bkt, id1, id2, id3, id4) - }); !ok { - return - } + if ok := t.Run("new_runs", func(t *testing.T) { + testSharding(t, "", bkt, id1, id2, id3, id4) + }); !ok { + return + } - dir2 := t.TempDir() + dir2 := t.TempDir() + + t.Run("reuse_disk", func(t *testing.T) { + testSharding(t, dir2, bkt, id1, id2, id3, id4) + }) + } - t.Run("reuse_disk", func(t *testing.T) { - testSharding(t, dir2, bkt, id1, id2, id3, id4) + t.Run("enableBirthstone", func(t *testing.T) { + runTest(t, true) + }) + t.Run("disableBirthstone", func(t *testing.T) { + runTest(t, false) }) } @@ -1143,7 +1161,7 @@ func TestBucketIndexReader_ExpandedPostings(t *testing.T) { testutil.Ok(tb, err) defer func() { testutil.Ok(tb, bkt.Close()) }() - id := uploadTestBlock(tb, tmpDir, bkt, 500) + id := uploadTestBlock(tb, tmpDir, bkt, 500, false) r, err := indexheader.NewBinaryReader(context.Background(), log.NewNopLogger(), bkt, tmpDir, id, DefaultPostingOffsetInMemorySampling, indexheader.NewBinaryReaderMetrics(nil)) testutil.Ok(tb, err) @@ -1160,14 +1178,14 @@ func BenchmarkBucketIndexReader_ExpandedPostings(b *testing.B) { testutil.Ok(tb, err) defer func() { testutil.Ok(tb, bkt.Close()) }() - id := uploadTestBlock(tb, tmpDir, bkt, 50e5) + id := uploadTestBlock(tb, tmpDir, bkt, 50e5, false) r, err := indexheader.NewBinaryReader(context.Background(), log.NewNopLogger(), bkt, tmpDir, id, DefaultPostingOffsetInMemorySampling, indexheader.NewBinaryReaderMetrics(nil)) testutil.Ok(tb, err) benchmarkExpandedPostings(tb, bkt, id, r, 50e5) } -func uploadTestBlock(t testing.TB, tmpDir string, bkt objstore.Bucket, series int) ulid.ULID { +func uploadTestBlock(t testing.TB, tmpDir string, bkt objstore.Bucket, series int, enableBirthstone bool) ulid.ULID { headOpts := tsdb.DefaultHeadOptions() headOpts.ChunkDirRoot = tmpDir headOpts.ChunkRange = 1000 @@ -1198,7 +1216,7 @@ func uploadTestBlock(t testing.TB, tmpDir string, bkt objstore.Bucket, series in IndexStats: metadata.IndexStats{SeriesMaxSize: stats.SeriesMaxSize, ChunkMaxSize: stats.ChunkMaxSize}, }, nil) testutil.Ok(t, err) - testutil.Ok(t, block.Upload(ctx, logger, bkt, bdir, metadata.NoneFunc)) + testutil.Ok(t, block.Upload(ctx, logger, bkt, bdir, metadata.NoneFunc, enableBirthstone)) return id } @@ -1315,74 +1333,92 @@ func benchmarkExpandedPostings( func TestExpandedPostingsEmptyPostings(t *testing.T) { t.Parallel() - tmpDir := t.TempDir() + runTest := func(t *testing.T, enableBirthstone bool) { + tmpDir := t.TempDir() - bkt, err := filesystem.NewBucket(filepath.Join(tmpDir, "bkt")) - testutil.Ok(t, err) - defer func() { testutil.Ok(t, bkt.Close()) }() + bkt, err := filesystem.NewBucket(filepath.Join(tmpDir, "bkt")) + testutil.Ok(t, err) + defer func() { testutil.Ok(t, bkt.Close()) }() - id := uploadTestBlock(t, tmpDir, bkt, 100) + id := uploadTestBlock(t, tmpDir, bkt, 100, enableBirthstone) - r, err := indexheader.NewBinaryReader(context.Background(), log.NewNopLogger(), bkt, tmpDir, id, DefaultPostingOffsetInMemorySampling, indexheader.NewBinaryReaderMetrics(nil)) - testutil.Ok(t, err) - b := &bucketBlock{ - metrics: newBucketStoreMetrics(nil), - indexHeaderReader: r, - indexCache: noopCache{}, - bkt: bkt, - meta: &metadata.Meta{BlockMeta: tsdb.BlockMeta{ULID: id}}, - partitioner: NewGapBasedPartitioner(PartitionerMaxGapSize), + r, err := indexheader.NewBinaryReader(context.Background(), log.NewNopLogger(), bkt, tmpDir, id, DefaultPostingOffsetInMemorySampling, indexheader.NewBinaryReaderMetrics(nil)) + testutil.Ok(t, err) + b := &bucketBlock{ + metrics: newBucketStoreMetrics(nil), + indexHeaderReader: r, + indexCache: noopCache{}, + bkt: bkt, + meta: &metadata.Meta{BlockMeta: tsdb.BlockMeta{ULID: id}}, + partitioner: NewGapBasedPartitioner(PartitionerMaxGapSize), + } + + logger := log.NewNopLogger() + indexr := newBucketIndexReader(b, logger) + matcher1 := labels.MustNewMatcher(labels.MatchEqual, "j", "foo") + // Match nothing. + matcher2 := labels.MustNewMatcher(labels.MatchRegexp, "i", "500.*") + ctx := context.Background() + dummyCounter := promauto.With(prometheus.NewRegistry()).NewCounter(prometheus.CounterOpts{Name: "test"}) + ps, err := indexr.ExpandedPostings(ctx, newSortedMatchers([]*labels.Matcher{matcher1, matcher2}), NewBytesLimiterFactory(0)(nil), false, dummyCounter, tenancy.DefaultTenant) + testutil.Ok(t, err) + testutil.Equals(t, ps, (*lazyExpandedPostings)(nil)) + // Make sure even if a matcher doesn't match any postings, we still cache empty expanded postings. + testutil.Equals(t, 1, indexr.stats.cachedPostingsCompressions) } - logger := log.NewNopLogger() - indexr := newBucketIndexReader(b, logger) - matcher1 := labels.MustNewMatcher(labels.MatchEqual, "j", "foo") - // Match nothing. - matcher2 := labels.MustNewMatcher(labels.MatchRegexp, "i", "500.*") - ctx := context.Background() - dummyCounter := promauto.With(prometheus.NewRegistry()).NewCounter(prometheus.CounterOpts{Name: "test"}) - ps, err := indexr.ExpandedPostings(ctx, newSortedMatchers([]*labels.Matcher{matcher1, matcher2}), NewBytesLimiterFactory(0)(nil), false, dummyCounter, tenancy.DefaultTenant) - testutil.Ok(t, err) - testutil.Equals(t, ps, (*lazyExpandedPostings)(nil)) - // Make sure even if a matcher doesn't match any postings, we still cache empty expanded postings. - testutil.Equals(t, 1, indexr.stats.cachedPostingsCompressions) + t.Run("enableBirthstone", func(t *testing.T) { + runTest(t, true) + }) + t.Run("disableBirthstone", func(t *testing.T) { + runTest(t, false) + }) } func TestLazyExpandedPostingsEmptyPostings(t *testing.T) { t.Parallel() - tmpDir := t.TempDir() + runTest := func(t *testing.T, enableBirthstone bool) { + tmpDir := t.TempDir() - bkt, err := filesystem.NewBucket(filepath.Join(tmpDir, "bkt")) - testutil.Ok(t, err) - defer func() { testutil.Ok(t, bkt.Close()) }() + bkt, err := filesystem.NewBucket(filepath.Join(tmpDir, "bkt")) + testutil.Ok(t, err) + defer func() { testutil.Ok(t, bkt.Close()) }() - id := uploadTestBlock(t, tmpDir, bkt, 100) + id := uploadTestBlock(t, tmpDir, bkt, 100, enableBirthstone) - r, err := indexheader.NewBinaryReader(context.Background(), log.NewNopLogger(), bkt, tmpDir, id, DefaultPostingOffsetInMemorySampling, indexheader.NewBinaryReaderMetrics(nil)) - testutil.Ok(t, err) - b := &bucketBlock{ - metrics: newBucketStoreMetrics(nil), - indexHeaderReader: r, - indexCache: noopCache{}, - bkt: bkt, - meta: &metadata.Meta{BlockMeta: tsdb.BlockMeta{ULID: id}}, - partitioner: NewGapBasedPartitioner(PartitionerMaxGapSize), - estimatedMaxSeriesSize: 20, + r, err := indexheader.NewBinaryReader(context.Background(), log.NewNopLogger(), bkt, tmpDir, id, DefaultPostingOffsetInMemorySampling, indexheader.NewBinaryReaderMetrics(nil)) + testutil.Ok(t, err) + b := &bucketBlock{ + metrics: newBucketStoreMetrics(nil), + indexHeaderReader: r, + indexCache: noopCache{}, + bkt: bkt, + meta: &metadata.Meta{BlockMeta: tsdb.BlockMeta{ULID: id}}, + partitioner: NewGapBasedPartitioner(PartitionerMaxGapSize), + estimatedMaxSeriesSize: 20, + } + + logger := log.NewNopLogger() + indexr := newBucketIndexReader(b, logger) + // matcher1 and matcher2 will match nothing after intersection. + matcher1 := labels.MustNewMatcher(labels.MatchEqual, "j", "foo") + matcher2 := labels.MustNewMatcher(labels.MatchRegexp, "n", "1_.*") + matcher3 := labels.MustNewMatcher(labels.MatchRegexp, "i", ".+") + ctx := context.Background() + dummyCounter := promauto.With(prometheus.NewRegistry()).NewCounter(prometheus.CounterOpts{Name: "test"}) + ps, err := indexr.ExpandedPostings(ctx, newSortedMatchers([]*labels.Matcher{matcher1, matcher2, matcher3}), NewBytesLimiterFactory(0)(nil), true, dummyCounter, tenancy.DefaultTenant) + testutil.Ok(t, err) + // We expect emptyLazyPostings rather than lazy postings with 0 length but with matchers. + testutil.Equals(t, ps, emptyLazyPostings) } - logger := log.NewNopLogger() - indexr := newBucketIndexReader(b, logger) - // matcher1 and matcher2 will match nothing after intersection. - matcher1 := labels.MustNewMatcher(labels.MatchEqual, "j", "foo") - matcher2 := labels.MustNewMatcher(labels.MatchRegexp, "n", "1_.*") - matcher3 := labels.MustNewMatcher(labels.MatchRegexp, "i", ".+") - ctx := context.Background() - dummyCounter := promauto.With(prometheus.NewRegistry()).NewCounter(prometheus.CounterOpts{Name: "test"}) - ps, err := indexr.ExpandedPostings(ctx, newSortedMatchers([]*labels.Matcher{matcher1, matcher2, matcher3}), NewBytesLimiterFactory(0)(nil), true, dummyCounter, tenancy.DefaultTenant) - testutil.Ok(t, err) - // We expect emptyLazyPostings rather than lazy postings with 0 length but with matchers. - testutil.Equals(t, ps, emptyLazyPostings) + t.Run("enableBirthstone", func(t *testing.T) { + runTest(t, true) + }) + t.Run("disableBirthstone", func(t *testing.T) { + runTest(t, false) + }) } func TestBucketSeries(t *testing.T) { @@ -1515,7 +1551,7 @@ func benchBucketSeries(t testutil.TB, sampleType chunkenc.ValueType, skipChunk, testutil.Ok(t, err) testutil.Ok(t, meta.WriteToDir(logger, blockIDDir)) - testutil.Ok(t, block.Upload(context.Background(), logger, bkt, blockIDDir, metadata.NoneFunc)) + testutil.Ok(t, block.Upload(context.Background(), logger, bkt, blockIDDir, metadata.NoneFunc, false)) } ibkt := objstore.WithNoopInstr(bkt) @@ -1654,186 +1690,195 @@ func (m *mockedPool) Put(b *[]byte) { func TestBucketSeries_OneBlock_InMemIndexCacheSegfault(t *testing.T) { t.Parallel() - tmpDir := t.TempDir() + runTest := func(t *testing.T, enableBirthstone bool) { + tmpDir := t.TempDir() - bkt, err := filesystem.NewBucket(filepath.Join(tmpDir, "bkt")) - testutil.Ok(t, err) - defer func() { testutil.Ok(t, bkt.Close()) }() + bkt, err := filesystem.NewBucket(filepath.Join(tmpDir, "bkt")) + testutil.Ok(t, err) + defer func() { testutil.Ok(t, bkt.Close()) }() - logger := log.NewLogfmtLogger(os.Stderr) - thanosMeta := metadata.Thanos{ - Labels: labels.FromStrings("ext1", "1").Map(), - Downsample: metadata.ThanosDownsample{Resolution: 0}, - Source: metadata.TestSource, - } + logger := log.NewLogfmtLogger(os.Stderr) + thanosMeta := metadata.Thanos{ + Labels: labels.FromStrings("ext1", "1").Map(), + Downsample: metadata.ThanosDownsample{Resolution: 0}, + Source: metadata.TestSource, + } - chunkPool, err := pool.NewBucketedPool[byte](chunkBytesPoolMinSize, chunkBytesPoolMaxSize, 2, 100e7) - testutil.Ok(t, err) + chunkPool, err := pool.NewBucketedPool[byte](chunkBytesPoolMinSize, chunkBytesPoolMaxSize, 2, 100e7) + testutil.Ok(t, err) - indexCache, err := storecache.NewInMemoryIndexCacheWithConfig(logger, nil, nil, storecache.InMemoryIndexCacheConfig{ - MaxItemSize: 3000, - // This is the exact size of cache needed for our *single request*. - // This is limited in order to make sure we test evictions. - MaxSize: 8889, - }) - testutil.Ok(t, err) + indexCache, err := storecache.NewInMemoryIndexCacheWithConfig(logger, nil, nil, storecache.InMemoryIndexCacheConfig{ + MaxItemSize: 3000, + // This is the exact size of cache needed for our *single request*. + // This is limited in order to make sure we test evictions. + MaxSize: 8889, + }) + testutil.Ok(t, err) - var b1 *bucketBlock + var b1 *bucketBlock - const numSeries = 100 - headOpts := tsdb.DefaultHeadOptions() - headOpts.ChunkDirRoot = tmpDir - headOpts.ChunkRange = 1 + const numSeries = 100 + headOpts := tsdb.DefaultHeadOptions() + headOpts.ChunkDirRoot = tmpDir + headOpts.ChunkRange = 1 - // Create 4 blocks. Each will have numSeriesPerBlock number of series that have 1 sample only. - // Timestamp will be counted for each new series, so each series will have unique timestamp. - // This allows to pick time range that will correspond to number of series picked 1:1. - { - // Block 1. - h, err := tsdb.NewHead(nil, nil, nil, nil, headOpts, nil) - testutil.Ok(t, err) - defer func() { testutil.Ok(t, h.Close()) }() + // Create 4 blocks. Each will have numSeriesPerBlock number of series that have 1 sample only. + // Timestamp will be counted for each new series, so each series will have unique timestamp. + // This allows to pick time range that will correspond to number of series picked 1:1. + { + // Block 1. + h, err := tsdb.NewHead(nil, nil, nil, nil, headOpts, nil) + testutil.Ok(t, err) + defer func() { testutil.Ok(t, h.Close()) }() - app := h.Appender(context.Background()) + app := h.Appender(context.Background()) + + for i := 0; i < numSeries; i++ { + ts := int64(i) + lbls := labels.FromStrings("foo", "bar", "b", "1", "i", fmt.Sprintf("%07d%s", ts, storetestutil.LabelLongSuffix)) + + _, err := app.Append(0, lbls, ts, 0) + testutil.Ok(t, err) + } + testutil.Ok(t, app.Commit()) - for i := 0; i < numSeries; i++ { - ts := int64(i) - lbls := labels.FromStrings("foo", "bar", "b", "1", "i", fmt.Sprintf("%07d%s", ts, storetestutil.LabelLongSuffix)) + blockDir := filepath.Join(tmpDir, "tmp") + id := storetestutil.CreateBlockFromHead(t, blockDir, h) - _, err := app.Append(0, lbls, ts, 0) + meta, err := metadata.InjectThanos(log.NewNopLogger(), filepath.Join(blockDir, id.String()), thanosMeta, nil) + testutil.Ok(t, err) + testutil.Ok(t, block.Upload(context.Background(), logger, bkt, filepath.Join(blockDir, id.String()), metadata.NoneFunc, enableBirthstone)) + + b1 = &bucketBlock{ + indexCache: indexCache, + metrics: newBucketStoreMetrics(nil), + bkt: bkt, + meta: meta, + partitioner: NewGapBasedPartitioner(PartitionerMaxGapSize), + chunkObjs: []string{filepath.Join(id.String(), "chunks", "000001")}, + chunkPool: chunkPool, + estimatedMaxSeriesSize: EstimatedMaxSeriesSize, + estimatedMaxChunkSize: EstimatedMaxChunkSize, + } + b1.indexHeaderReader, err = indexheader.NewBinaryReader(context.Background(), log.NewNopLogger(), bkt, tmpDir, b1.meta.ULID, DefaultPostingOffsetInMemorySampling, indexheader.NewBinaryReaderMetrics(nil)) testutil.Ok(t, err) } - testutil.Ok(t, app.Commit()) - blockDir := filepath.Join(tmpDir, "tmp") - id := storetestutil.CreateBlockFromHead(t, blockDir, h) - - meta, err := metadata.InjectThanos(log.NewNopLogger(), filepath.Join(blockDir, id.String()), thanosMeta, nil) - testutil.Ok(t, err) - testutil.Ok(t, block.Upload(context.Background(), logger, bkt, filepath.Join(blockDir, id.String()), metadata.NoneFunc)) + var b2 *bucketBlock + { + // Block 2, do not load this block yet. + h, err := tsdb.NewHead(nil, nil, nil, nil, headOpts, nil) + testutil.Ok(t, err) + defer func() { testutil.Ok(t, h.Close()) }() - b1 = &bucketBlock{ - indexCache: indexCache, - metrics: newBucketStoreMetrics(nil), - bkt: bkt, - meta: meta, - partitioner: NewGapBasedPartitioner(PartitionerMaxGapSize), - chunkObjs: []string{filepath.Join(id.String(), "chunks", "000001")}, - chunkPool: chunkPool, - estimatedMaxSeriesSize: EstimatedMaxSeriesSize, - estimatedMaxChunkSize: EstimatedMaxChunkSize, - } - b1.indexHeaderReader, err = indexheader.NewBinaryReader(context.Background(), log.NewNopLogger(), bkt, tmpDir, b1.meta.ULID, DefaultPostingOffsetInMemorySampling, indexheader.NewBinaryReaderMetrics(nil)) - testutil.Ok(t, err) - } + app := h.Appender(context.Background()) - var b2 *bucketBlock - { - // Block 2, do not load this block yet. - h, err := tsdb.NewHead(nil, nil, nil, nil, headOpts, nil) - testutil.Ok(t, err) - defer func() { testutil.Ok(t, h.Close()) }() + for i := 0; i < numSeries; i++ { + ts := int64(i) + lbls := labels.FromStrings("foo", "bar", "b", "2", "i", fmt.Sprintf("%07d%s", ts, storetestutil.LabelLongSuffix)) - app := h.Appender(context.Background()) + _, err := app.Append(0, lbls, ts, 0) + testutil.Ok(t, err) + } + testutil.Ok(t, app.Commit()) - for i := 0; i < numSeries; i++ { - ts := int64(i) - lbls := labels.FromStrings("foo", "bar", "b", "2", "i", fmt.Sprintf("%07d%s", ts, storetestutil.LabelLongSuffix)) + blockDir := filepath.Join(tmpDir, "tmp2") + id := storetestutil.CreateBlockFromHead(t, blockDir, h) - _, err := app.Append(0, lbls, ts, 0) + meta, err := metadata.InjectThanos(log.NewNopLogger(), filepath.Join(blockDir, id.String()), thanosMeta, nil) + testutil.Ok(t, err) + testutil.Ok(t, block.Upload(context.Background(), logger, bkt, filepath.Join(blockDir, id.String()), metadata.NoneFunc, enableBirthstone)) + + b2 = &bucketBlock{ + indexCache: indexCache, + metrics: newBucketStoreMetrics(nil), + bkt: bkt, + meta: meta, + partitioner: NewGapBasedPartitioner(PartitionerMaxGapSize), + chunkObjs: []string{filepath.Join(id.String(), "chunks", "000001")}, + chunkPool: chunkPool, + estimatedMaxSeriesSize: EstimatedMaxSeriesSize, + estimatedMaxChunkSize: EstimatedMaxChunkSize, + } + b2.indexHeaderReader, err = indexheader.NewBinaryReader(context.Background(), log.NewNopLogger(), bkt, tmpDir, b2.meta.ULID, DefaultPostingOffsetInMemorySampling, indexheader.NewBinaryReaderMetrics(nil)) testutil.Ok(t, err) } - testutil.Ok(t, app.Commit()) - - blockDir := filepath.Join(tmpDir, "tmp2") - id := storetestutil.CreateBlockFromHead(t, blockDir, h) - - meta, err := metadata.InjectThanos(log.NewNopLogger(), filepath.Join(blockDir, id.String()), thanosMeta, nil) - testutil.Ok(t, err) - testutil.Ok(t, block.Upload(context.Background(), logger, bkt, filepath.Join(blockDir, id.String()), metadata.NoneFunc)) - b2 = &bucketBlock{ - indexCache: indexCache, - metrics: newBucketStoreMetrics(nil), - bkt: bkt, - meta: meta, - partitioner: NewGapBasedPartitioner(PartitionerMaxGapSize), - chunkObjs: []string{filepath.Join(id.String(), "chunks", "000001")}, - chunkPool: chunkPool, - estimatedMaxSeriesSize: EstimatedMaxSeriesSize, - estimatedMaxChunkSize: EstimatedMaxChunkSize, + store := &BucketStore{ + bkt: objstore.WithNoopInstr(bkt), + logger: logger, + indexCache: indexCache, + indexReaderPool: indexheader.NewReaderPool(log.NewNopLogger(), false, 0, indexheader.NewReaderPoolMetrics(nil), indexheader.AlwaysEagerDownloadIndexHeader), + metrics: newBucketStoreMetrics(nil), + blockSets: map[uint64]*bucketBlockSet{ + labels.FromStrings("ext1", "1").Hash(): {blocks: [][]*bucketBlock{{b1, b2}}}, + }, + blocks: map[ulid.ULID]*bucketBlock{ + b1.meta.ULID: b1, + b2.meta.ULID: b2, + }, + queryGate: gate.NewNoop(), + chunksLimiterFactory: NewChunksLimiterFactory(0), + seriesLimiterFactory: NewSeriesLimiterFactory(0), + bytesLimiterFactory: NewBytesLimiterFactory(0), + seriesBatchSize: SeriesBatchSize, + requestLoggerFunc: NoopRequestLoggerFunc, } - b2.indexHeaderReader, err = indexheader.NewBinaryReader(context.Background(), log.NewNopLogger(), bkt, tmpDir, b2.meta.ULID, DefaultPostingOffsetInMemorySampling, indexheader.NewBinaryReaderMetrics(nil)) - testutil.Ok(t, err) - } - store := &BucketStore{ - bkt: objstore.WithNoopInstr(bkt), - logger: logger, - indexCache: indexCache, - indexReaderPool: indexheader.NewReaderPool(log.NewNopLogger(), false, 0, indexheader.NewReaderPoolMetrics(nil), indexheader.AlwaysEagerDownloadIndexHeader), - metrics: newBucketStoreMetrics(nil), - blockSets: map[uint64]*bucketBlockSet{ - labels.FromStrings("ext1", "1").Hash(): {blocks: [][]*bucketBlock{{b1, b2}}}, - }, - blocks: map[ulid.ULID]*bucketBlock{ - b1.meta.ULID: b1, - b2.meta.ULID: b2, - }, - queryGate: gate.NewNoop(), - chunksLimiterFactory: NewChunksLimiterFactory(0), - seriesLimiterFactory: NewSeriesLimiterFactory(0), - bytesLimiterFactory: NewBytesLimiterFactory(0), - seriesBatchSize: SeriesBatchSize, - requestLoggerFunc: NoopRequestLoggerFunc, + t.Run("invoke series for one block. Fill the cache on the way.", func(t *testing.T) { + srv := newStoreSeriesServer(context.Background()) + testutil.Ok(t, store.Series(&storepb.SeriesRequest{ + MinTime: 0, + MaxTime: int64(numSeries) - 1, + Matchers: []storepb.LabelMatcher{ + {Type: storepb.LabelMatcher_EQ, Name: "foo", Value: "bar"}, + {Type: storepb.LabelMatcher_EQ, Name: "b", Value: "1"}, + // This bug shows only when we use lot's of symbols for matching. + {Type: storepb.LabelMatcher_NEQ, Name: "i", Value: ""}, + }, + }, srv)) + testutil.Equals(t, 0, len(srv.Warnings)) + testutil.Equals(t, numSeries, len(srv.SeriesSet)) + }) + t.Run("invoke series for second block. This should revoke previous cache.", func(t *testing.T) { + srv := newStoreSeriesServer(context.Background()) + testutil.Ok(t, store.Series(&storepb.SeriesRequest{ + MinTime: 0, + MaxTime: int64(numSeries) - 1, + Matchers: []storepb.LabelMatcher{ + {Type: storepb.LabelMatcher_EQ, Name: "foo", Value: "bar"}, + {Type: storepb.LabelMatcher_EQ, Name: "b", Value: "2"}, + // This bug shows only when we use lot's of symbols for matching. + {Type: storepb.LabelMatcher_NEQ, Name: "i", Value: ""}, + }, + }, srv)) + testutil.Equals(t, 0, len(srv.Warnings)) + testutil.Equals(t, numSeries, len(srv.SeriesSet)) + }) + t.Run("remove second block. Cache stays. Ask for first again.", func(t *testing.T) { + testutil.Ok(t, store.removeBlock(b2.meta.ULID)) + + srv := newStoreSeriesServer(context.Background()) + testutil.Ok(t, store.Series(&storepb.SeriesRequest{ + MinTime: 0, + MaxTime: int64(numSeries) - 1, + Matchers: []storepb.LabelMatcher{ + {Type: storepb.LabelMatcher_EQ, Name: "foo", Value: "bar"}, + {Type: storepb.LabelMatcher_EQ, Name: "b", Value: "1"}, + // This bug shows only when we use lot's of symbols for matching. + {Type: storepb.LabelMatcher_NEQ, Name: "i", Value: ""}, + }, + }, srv)) + testutil.Equals(t, 0, len(srv.Warnings)) + testutil.Equals(t, numSeries, len(srv.SeriesSet)) + }) } - t.Run("invoke series for one block. Fill the cache on the way.", func(t *testing.T) { - srv := newStoreSeriesServer(context.Background()) - testutil.Ok(t, store.Series(&storepb.SeriesRequest{ - MinTime: 0, - MaxTime: int64(numSeries) - 1, - Matchers: []storepb.LabelMatcher{ - {Type: storepb.LabelMatcher_EQ, Name: "foo", Value: "bar"}, - {Type: storepb.LabelMatcher_EQ, Name: "b", Value: "1"}, - // This bug shows only when we use lot's of symbols for matching. - {Type: storepb.LabelMatcher_NEQ, Name: "i", Value: ""}, - }, - }, srv)) - testutil.Equals(t, 0, len(srv.Warnings)) - testutil.Equals(t, numSeries, len(srv.SeriesSet)) - }) - t.Run("invoke series for second block. This should revoke previous cache.", func(t *testing.T) { - srv := newStoreSeriesServer(context.Background()) - testutil.Ok(t, store.Series(&storepb.SeriesRequest{ - MinTime: 0, - MaxTime: int64(numSeries) - 1, - Matchers: []storepb.LabelMatcher{ - {Type: storepb.LabelMatcher_EQ, Name: "foo", Value: "bar"}, - {Type: storepb.LabelMatcher_EQ, Name: "b", Value: "2"}, - // This bug shows only when we use lot's of symbols for matching. - {Type: storepb.LabelMatcher_NEQ, Name: "i", Value: ""}, - }, - }, srv)) - testutil.Equals(t, 0, len(srv.Warnings)) - testutil.Equals(t, numSeries, len(srv.SeriesSet)) + t.Run("enableBirthstone", func(t *testing.T) { + runTest(t, true) }) - t.Run("remove second block. Cache stays. Ask for first again.", func(t *testing.T) { - testutil.Ok(t, store.removeBlock(b2.meta.ULID)) - - srv := newStoreSeriesServer(context.Background()) - testutil.Ok(t, store.Series(&storepb.SeriesRequest{ - MinTime: 0, - MaxTime: int64(numSeries) - 1, - Matchers: []storepb.LabelMatcher{ - {Type: storepb.LabelMatcher_EQ, Name: "foo", Value: "bar"}, - {Type: storepb.LabelMatcher_EQ, Name: "b", Value: "1"}, - // This bug shows only when we use lot's of symbols for matching. - {Type: storepb.LabelMatcher_NEQ, Name: "i", Value: ""}, - }, - }, srv)) - testutil.Equals(t, 0, len(srv.Warnings)) - testutil.Equals(t, numSeries, len(srv.SeriesSet)) + t.Run("disableBirthstone", func(t *testing.T) { + runTest(t, false) }) } @@ -2022,266 +2067,284 @@ func TestSeries_ErrorUnmarshallingRequestHints(t *testing.T) { func TestSeries_BlockWithMultipleChunks(t *testing.T) { t.Parallel() - tb := testutil.NewTB(t) - - tmpDir := t.TempDir() + runTest := func(t *testing.T, enableBirthstone bool) { + tb := testutil.NewTB(t) - // Create a block with 1 series but an high number of samples, - // so that they will span across multiple chunks. - headOpts := tsdb.DefaultHeadOptions() - headOpts.ChunkDirRoot = filepath.Join(tmpDir, "block") - headOpts.ChunkRange = 10000000000 + tmpDir := t.TempDir() - h, err := tsdb.NewHead(nil, nil, nil, nil, headOpts, nil) - testutil.Ok(t, err) - defer func() { testutil.Ok(t, h.Close()) }() + // Create a block with 1 series but an high number of samples, + // so that they will span across multiple chunks. + headOpts := tsdb.DefaultHeadOptions() + headOpts.ChunkDirRoot = filepath.Join(tmpDir, "block") + headOpts.ChunkRange = 10000000000 - series := labels.FromStrings("__name__", "test") - for ts := int64(0); ts < 10000; ts++ { - // Appending a single sample is very unoptimised, but guarantees each chunk is always MaxSamplesPerChunk - // (except the last one, which could be smaller). - app := h.Appender(context.Background()) - _, err := app.Append(0, series, ts, float64(ts)) + h, err := tsdb.NewHead(nil, nil, nil, nil, headOpts, nil) testutil.Ok(t, err) - testutil.Ok(t, app.Commit()) - } - - blk := storetestutil.CreateBlockFromHead(t, headOpts.ChunkDirRoot, h) - - thanosMeta := metadata.Thanos{ - Labels: labels.FromStrings("ext1", "1").Map(), - Downsample: metadata.ThanosDownsample{Resolution: 0}, - Source: metadata.TestSource, - } + defer func() { testutil.Ok(t, h.Close()) }() - _, err = metadata.InjectThanos(log.NewNopLogger(), filepath.Join(headOpts.ChunkDirRoot, blk.String()), thanosMeta, nil) - testutil.Ok(t, err) + series := labels.FromStrings("__name__", "test") + for ts := int64(0); ts < 10000; ts++ { + // Appending a single sample is very unoptimised, but guarantees each chunk is always MaxSamplesPerChunk + // (except the last one, which could be smaller). + app := h.Appender(context.Background()) + _, err := app.Append(0, series, ts, float64(ts)) + testutil.Ok(t, err) + testutil.Ok(t, app.Commit()) + } - // Create a bucket and upload the block there. - bktDir := filepath.Join(tmpDir, "bucket") - bkt, err := filesystem.NewBucket(bktDir) - testutil.Ok(t, err) - defer func() { testutil.Ok(t, bkt.Close()) }() + blk := storetestutil.CreateBlockFromHead(t, headOpts.ChunkDirRoot, h) - instrBkt := objstore.WithNoopInstr(bkt) - logger := log.NewNopLogger() - testutil.Ok(t, block.Upload(context.Background(), logger, bkt, filepath.Join(headOpts.ChunkDirRoot, blk.String()), metadata.NoneFunc)) + thanosMeta := metadata.Thanos{ + Labels: labels.FromStrings("ext1", "1").Map(), + Downsample: metadata.ThanosDownsample{Resolution: 0}, + Source: metadata.TestSource, + } - // Instance a real bucket store we'll use to query the series. - baseBlockIDsFetcher := block.NewConcurrentLister(logger, instrBkt) - fetcher, err := block.NewMetaFetcher(logger, 10, instrBkt, baseBlockIDsFetcher, tmpDir, nil, nil) - testutil.Ok(tb, err) + _, err = metadata.InjectThanos(log.NewNopLogger(), filepath.Join(headOpts.ChunkDirRoot, blk.String()), thanosMeta, nil) + testutil.Ok(t, err) - indexCache, err := storecache.NewInMemoryIndexCacheWithConfig(logger, nil, nil, storecache.InMemoryIndexCacheConfig{}) - testutil.Ok(tb, err) - - store, err := NewBucketStore( - instrBkt, - fetcher, - tmpDir, - NewChunksLimiterFactory(100000/MaxSamplesPerChunk), - NewSeriesLimiterFactory(0), - NewBytesLimiterFactory(0), - NewGapBasedPartitioner(PartitionerMaxGapSize), - 10, - false, - DefaultPostingOffsetInMemorySampling, - true, - false, - 0, - WithLogger(logger), - WithIndexCache(indexCache), - ) - testutil.Ok(tb, err) - testutil.Ok(tb, store.SyncBlocks(context.Background())) - - tests := map[string]struct { - reqMinTime int64 - reqMaxTime int64 - expectedSamples int - }{ - "query the entire block": { - reqMinTime: math.MinInt64, - reqMaxTime: math.MaxInt64, - expectedSamples: 10000, - }, - "query the beginning of the block": { - reqMinTime: 0, - reqMaxTime: 100, - expectedSamples: MaxSamplesPerChunk, - }, - "query the middle of the block": { - reqMinTime: 4000, - reqMaxTime: 4050, - expectedSamples: MaxSamplesPerChunk, - }, - "query the end of the block": { - reqMinTime: 9800, - reqMaxTime: 10000, - expectedSamples: (MaxSamplesPerChunk * 2) + (10000 % MaxSamplesPerChunk), - }, - } - - for testName, testData := range tests { - t.Run(testName, func(t *testing.T) { - req := &storepb.SeriesRequest{ - MinTime: testData.reqMinTime, - MaxTime: testData.reqMaxTime, - Matchers: []storepb.LabelMatcher{ - {Type: storepb.LabelMatcher_EQ, Name: "__name__", Value: "test"}, - }, - } + // Create a bucket and upload the block there. + bktDir := filepath.Join(tmpDir, "bucket") + bkt, err := filesystem.NewBucket(bktDir) + testutil.Ok(t, err) + defer func() { testutil.Ok(t, bkt.Close()) }() + + instrBkt := objstore.WithNoopInstr(bkt) + logger := log.NewNopLogger() + testutil.Ok(t, block.Upload(context.Background(), logger, bkt, filepath.Join(headOpts.ChunkDirRoot, blk.String()), metadata.NoneFunc, enableBirthstone)) + + // Instance a real bucket store we'll use to query the series. + baseBlockIDsFetcher := block.NewConcurrentLister(logger, instrBkt) + fetcher, err := block.NewMetaFetcher(logger, 10, instrBkt, baseBlockIDsFetcher, tmpDir, nil, nil) + testutil.Ok(tb, err) + + indexCache, err := storecache.NewInMemoryIndexCacheWithConfig(logger, nil, nil, storecache.InMemoryIndexCacheConfig{}) + testutil.Ok(tb, err) + + store, err := NewBucketStore( + instrBkt, + fetcher, + tmpDir, + NewChunksLimiterFactory(100000/MaxSamplesPerChunk), + NewSeriesLimiterFactory(0), + NewBytesLimiterFactory(0), + NewGapBasedPartitioner(PartitionerMaxGapSize), + 10, + false, + DefaultPostingOffsetInMemorySampling, + true, + false, + 0, + WithLogger(logger), + WithIndexCache(indexCache), + ) + testutil.Ok(tb, err) + testutil.Ok(tb, store.SyncBlocks(context.Background())) + + tests := map[string]struct { + reqMinTime int64 + reqMaxTime int64 + expectedSamples int + }{ + "query the entire block": { + reqMinTime: math.MinInt64, + reqMaxTime: math.MaxInt64, + expectedSamples: 10000, + }, + "query the beginning of the block": { + reqMinTime: 0, + reqMaxTime: 100, + expectedSamples: MaxSamplesPerChunk, + }, + "query the middle of the block": { + reqMinTime: 4000, + reqMaxTime: 4050, + expectedSamples: MaxSamplesPerChunk, + }, + "query the end of the block": { + reqMinTime: 9800, + reqMaxTime: 10000, + expectedSamples: (MaxSamplesPerChunk * 2) + (10000 % MaxSamplesPerChunk), + }, + } - srv := newStoreSeriesServer(context.Background()) - err = store.Series(req, srv) - testutil.Ok(t, err) - testutil.Assert(t, len(srv.SeriesSet) == 1) + for testName, testData := range tests { + t.Run(testName, func(t *testing.T) { + req := &storepb.SeriesRequest{ + MinTime: testData.reqMinTime, + MaxTime: testData.reqMaxTime, + Matchers: []storepb.LabelMatcher{ + {Type: storepb.LabelMatcher_EQ, Name: "__name__", Value: "test"}, + }, + } - // Count the number of samples in the returned chunks. - numSamples := 0 - for _, rawChunk := range srv.SeriesSet[0].Chunks { - decodedChunk, err := chunkenc.FromData(chunkenc.EncXOR, rawChunk.Raw.Data) + srv := newStoreSeriesServer(context.Background()) + err = store.Series(req, srv) testutil.Ok(t, err) + testutil.Assert(t, len(srv.SeriesSet) == 1) - numSamples += decodedChunk.NumSamples() - } + // Count the number of samples in the returned chunks. + numSamples := 0 + for _, rawChunk := range srv.SeriesSet[0].Chunks { + decodedChunk, err := chunkenc.FromData(chunkenc.EncXOR, rawChunk.Raw.Data) + testutil.Ok(t, err) - testutil.Assert(t, testData.expectedSamples == numSamples, "expected: %d, actual: %d", testData.expectedSamples, numSamples) - }) + numSamples += decodedChunk.NumSamples() + } + + testutil.Assert(t, testData.expectedSamples == numSamples, "expected: %d, actual: %d", testData.expectedSamples, numSamples) + }) + } } + + t.Run("enableBirthstone", func(t *testing.T) { + runTest(t, true) + }) + t.Run("disableBirthstone", func(t *testing.T) { + runTest(t, false) + }) } func TestSeries_SeriesSortedWithoutReplicaLabels(t *testing.T) { t.Parallel() - tests := map[string]struct { - series [][]labels.Labels - replicaLabels []string - expectedSeries []labels.Labels - }{ - "use TSDB label as replica label": { - series: [][]labels.Labels{ - { - labels.FromStrings("a", "1", "replica", "1", "z", "1"), - labels.FromStrings("a", "1", "replica", "1", "z", "2"), - labels.FromStrings("a", "1", "replica", "2", "z", "1"), - labels.FromStrings("a", "1", "replica", "2", "z", "2"), - labels.FromStrings("a", "2", "replica", "1", "z", "1"), - labels.FromStrings("a", "2", "replica", "2", "z", "1"), + runTest := func(t *testing.T, enableBirthstone bool) { + tests := map[string]struct { + series [][]labels.Labels + replicaLabels []string + expectedSeries []labels.Labels + }{ + "use TSDB label as replica label": { + series: [][]labels.Labels{ + { + labels.FromStrings("a", "1", "replica", "1", "z", "1"), + labels.FromStrings("a", "1", "replica", "1", "z", "2"), + labels.FromStrings("a", "1", "replica", "2", "z", "1"), + labels.FromStrings("a", "1", "replica", "2", "z", "2"), + labels.FromStrings("a", "2", "replica", "1", "z", "1"), + labels.FromStrings("a", "2", "replica", "2", "z", "1"), + }, + { + labels.FromStrings("a", "1", "replica", "3", "z", "1"), + labels.FromStrings("a", "1", "replica", "3", "z", "2"), + labels.FromStrings("a", "2", "replica", "3", "z", "1"), + }, }, - { - labels.FromStrings("a", "1", "replica", "3", "z", "1"), - labels.FromStrings("a", "1", "replica", "3", "z", "2"), - labels.FromStrings("a", "2", "replica", "3", "z", "1"), + replicaLabels: []string{"replica"}, + expectedSeries: []labels.Labels{ + labels.FromStrings("a", "1", "ext1", "0", "z", "1"), + labels.FromStrings("a", "1", "ext1", "0", "z", "2"), + labels.FromStrings("a", "1", "ext1", "1", "z", "1"), + labels.FromStrings("a", "1", "ext1", "1", "z", "2"), + labels.FromStrings("a", "2", "ext1", "0", "z", "1"), + labels.FromStrings("a", "2", "ext1", "1", "z", "1"), }, }, - replicaLabels: []string{"replica"}, - expectedSeries: []labels.Labels{ - labels.FromStrings("a", "1", "ext1", "0", "z", "1"), - labels.FromStrings("a", "1", "ext1", "0", "z", "2"), - labels.FromStrings("a", "1", "ext1", "1", "z", "1"), - labels.FromStrings("a", "1", "ext1", "1", "z", "2"), - labels.FromStrings("a", "2", "ext1", "0", "z", "1"), - labels.FromStrings("a", "2", "ext1", "1", "z", "1"), - }, - }, - "use external label as replica label": { - series: [][]labels.Labels{ - { + "use external label as replica label": { + series: [][]labels.Labels{ + { + labels.FromStrings("a", "1", "replica", "1", "z", "1"), + labels.FromStrings("a", "1", "replica", "1", "z", "2"), + labels.FromStrings("a", "1", "replica", "2", "z", "1"), + labels.FromStrings("a", "1", "replica", "2", "z", "2"), + }, + { + labels.FromStrings("a", "1", "replica", "1", "z", "1"), + labels.FromStrings("a", "1", "replica", "1", "z", "2"), + }, + }, + replicaLabels: []string{"ext1"}, + expectedSeries: []labels.Labels{ labels.FromStrings("a", "1", "replica", "1", "z", "1"), labels.FromStrings("a", "1", "replica", "1", "z", "2"), labels.FromStrings("a", "1", "replica", "2", "z", "1"), labels.FromStrings("a", "1", "replica", "2", "z", "2"), }, - { - labels.FromStrings("a", "1", "replica", "1", "z", "1"), - labels.FromStrings("a", "1", "replica", "1", "z", "2"), - }, }, - replicaLabels: []string{"ext1"}, - expectedSeries: []labels.Labels{ - labels.FromStrings("a", "1", "replica", "1", "z", "1"), - labels.FromStrings("a", "1", "replica", "1", "z", "2"), - labels.FromStrings("a", "1", "replica", "2", "z", "1"), - labels.FromStrings("a", "1", "replica", "2", "z", "2"), - }, - }, - } - - for testName, testData := range tests { - t.Run(testName, func(t *testing.T) { - tb := testutil.NewTB(t) - - tmpDir := t.TempDir() + } - bktDir := filepath.Join(tmpDir, "bucket") - bkt, err := filesystem.NewBucket(bktDir) - testutil.Ok(t, err) - defer testutil.Ok(t, bkt.Close()) + for testName, testData := range tests { + t.Run(testName, func(t *testing.T) { + tb := testutil.NewTB(t) - instrBkt := objstore.WithNoopInstr(bkt) - logger := log.NewNopLogger() + tmpDir := t.TempDir() - for i, series := range testData.series { - replicaVal := strconv.Itoa(i) - head := uploadSeriesToBucket(t, bkt, replicaVal, filepath.Join(tmpDir, replicaVal), series) - defer testutil.Ok(t, head.Close()) - } + bktDir := filepath.Join(tmpDir, "bucket") + bkt, err := filesystem.NewBucket(bktDir) + testutil.Ok(t, err) + defer testutil.Ok(t, bkt.Close()) - // Instance a real bucket store we'll use to query the series. - baseBlockIDsFetcher := block.NewConcurrentLister(logger, instrBkt) - fetcher, err := block.NewMetaFetcher(logger, 10, instrBkt, baseBlockIDsFetcher, tmpDir, nil, nil) - testutil.Ok(tb, err) + instrBkt := objstore.WithNoopInstr(bkt) + logger := log.NewNopLogger() - indexCache, err := storecache.NewInMemoryIndexCacheWithConfig(logger, nil, nil, storecache.InMemoryIndexCacheConfig{}) - testutil.Ok(tb, err) + for i, series := range testData.series { + replicaVal := strconv.Itoa(i) + head := uploadSeriesToBucket(t, bkt, replicaVal, filepath.Join(tmpDir, replicaVal), series, enableBirthstone) + defer testutil.Ok(t, head.Close()) + } - store, err := NewBucketStore( - instrBkt, - fetcher, - tmpDir, - NewChunksLimiterFactory(100000/MaxSamplesPerChunk), - NewSeriesLimiterFactory(0), - NewBytesLimiterFactory(0), - NewGapBasedPartitioner(PartitionerMaxGapSize), - 10, - false, - DefaultPostingOffsetInMemorySampling, - true, - false, - 0, - WithLogger(logger), - WithIndexCache(indexCache), - ) - testutil.Ok(tb, err) - testutil.Ok(tb, store.SyncBlocks(context.Background())) + // Instance a real bucket store we'll use to query the series. + baseBlockIDsFetcher := block.NewConcurrentLister(logger, instrBkt) + fetcher, err := block.NewMetaFetcher(logger, 10, instrBkt, baseBlockIDsFetcher, tmpDir, nil, nil) + testutil.Ok(tb, err) + + indexCache, err := storecache.NewInMemoryIndexCacheWithConfig(logger, nil, nil, storecache.InMemoryIndexCacheConfig{}) + testutil.Ok(tb, err) + + store, err := NewBucketStore( + instrBkt, + fetcher, + tmpDir, + NewChunksLimiterFactory(100000/MaxSamplesPerChunk), + NewSeriesLimiterFactory(0), + NewBytesLimiterFactory(0), + NewGapBasedPartitioner(PartitionerMaxGapSize), + 10, + false, + DefaultPostingOffsetInMemorySampling, + true, + false, + 0, + WithLogger(logger), + WithIndexCache(indexCache), + ) + testutil.Ok(tb, err) + testutil.Ok(tb, store.SyncBlocks(context.Background())) - req := &storepb.SeriesRequest{ - MinTime: math.MinInt, - MaxTime: math.MaxInt64, - Matchers: []storepb.LabelMatcher{ - {Type: storepb.LabelMatcher_RE, Name: "a", Value: ".+"}, - }, - WithoutReplicaLabels: testData.replicaLabels, - } + req := &storepb.SeriesRequest{ + MinTime: math.MinInt, + MaxTime: math.MaxInt64, + Matchers: []storepb.LabelMatcher{ + {Type: storepb.LabelMatcher_RE, Name: "a", Value: ".+"}, + }, + WithoutReplicaLabels: testData.replicaLabels, + } - srv := newStoreSeriesServer(context.Background()) - err = store.Series(req, srv) - testutil.Ok(t, err) - testutil.Assert(t, len(srv.SeriesSet) == len(testData.expectedSeries)) + srv := newStoreSeriesServer(context.Background()) + err = store.Series(req, srv) + testutil.Ok(t, err) + testutil.Assert(t, len(srv.SeriesSet) == len(testData.expectedSeries)) - var response []labels.Labels - for _, respSeries := range srv.SeriesSet { - promLabels := labelpb.ZLabelsToPromLabels(respSeries.Labels) - response = append(response, promLabels) - } + var response []labels.Labels + for _, respSeries := range srv.SeriesSet { + promLabels := labelpb.ZLabelsToPromLabels(respSeries.Labels) + response = append(response, promLabels) + } - testutil.Equals(t, testData.expectedSeries, response) - }) + testutil.Equals(t, testData.expectedSeries, response) + }) + } } + + t.Run("enableBirthstone", func(t *testing.T) { + runTest(t, true) + }) + t.Run("disableBirthstone", func(t *testing.T) { + runTest(t, false) + }) } -func uploadSeriesToBucket(t *testing.T, bkt *filesystem.Bucket, replica string, path string, series []labels.Labels) *tsdb.Head { +func uploadSeriesToBucket(t *testing.T, bkt *filesystem.Bucket, replica string, path string, series []labels.Labels, enableBirthstone bool) *tsdb.Head { headOpts := tsdb.DefaultHeadOptions() headOpts.ChunkDirRoot = filepath.Join(path, "block") @@ -2310,7 +2373,7 @@ func uploadSeriesToBucket(t *testing.T, bkt *filesystem.Bucket, replica string, _, err = metadata.InjectThanos(log.NewNopLogger(), filepath.Join(headOpts.ChunkDirRoot, blk.String()), thanosMeta, nil) testutil.Ok(t, err) - testutil.Ok(t, block.Upload(context.Background(), log.NewNopLogger(), bkt, filepath.Join(headOpts.ChunkDirRoot, blk.String()), metadata.NoneFunc)) + testutil.Ok(t, block.Upload(context.Background(), log.NewNopLogger(), bkt, filepath.Join(headOpts.ChunkDirRoot, blk.String()), metadata.NoneFunc, enableBirthstone)) testutil.Ok(t, err) return h @@ -2598,116 +2661,125 @@ func TestLabelNamesAndValuesHints(t *testing.T) { func TestSeries_ChunksHaveHashRepresentation(t *testing.T) { t.Parallel() - tb := testutil.NewTB(t) - - tmpDir := t.TempDir() + runTest := func(t *testing.T, enableBirthstone bool) { + tb := testutil.NewTB(t) - headOpts := tsdb.DefaultHeadOptions() - headOpts.ChunkDirRoot = filepath.Join(tmpDir, "block") + tmpDir := t.TempDir() - h, err := tsdb.NewHead(nil, nil, nil, nil, headOpts, nil) - testutil.Ok(t, err) - defer func() { testutil.Ok(t, h.Close()) }() + headOpts := tsdb.DefaultHeadOptions() + headOpts.ChunkDirRoot = filepath.Join(tmpDir, "block") - series := labels.FromStrings("__name__", "test") - app := h.Appender(context.Background()) - for ts := int64(0); ts < 10_000; ts++ { - _, err := app.Append(0, series, ts, float64(ts)) + h, err := tsdb.NewHead(nil, nil, nil, nil, headOpts, nil) testutil.Ok(t, err) - } - testutil.Ok(t, app.Commit()) - - blk := storetestutil.CreateBlockFromHead(t, headOpts.ChunkDirRoot, h) - - thanosMeta := metadata.Thanos{ - Labels: labels.FromStrings("ext1", "1").Map(), - Downsample: metadata.ThanosDownsample{Resolution: 0}, - Source: metadata.TestSource, - } - - _, err = metadata.InjectThanos(log.NewNopLogger(), filepath.Join(headOpts.ChunkDirRoot, blk.String()), thanosMeta, nil) - testutil.Ok(t, err) - - // Create a bucket and upload the block there. - bktDir := filepath.Join(tmpDir, "bucket") - bkt, err := filesystem.NewBucket(bktDir) - testutil.Ok(t, err) - defer func() { testutil.Ok(t, bkt.Close()) }() - - instrBkt := objstore.WithNoopInstr(bkt) - logger := log.NewNopLogger() - testutil.Ok(t, block.Upload(context.Background(), logger, bkt, filepath.Join(headOpts.ChunkDirRoot, blk.String()), metadata.NoneFunc)) - - // Instance a real bucket store we'll use to query the series. - baseBlockIDsFetcher := block.NewConcurrentLister(logger, instrBkt) - fetcher, err := block.NewMetaFetcher(logger, 10, instrBkt, baseBlockIDsFetcher, tmpDir, nil, nil) - testutil.Ok(tb, err) + defer func() { testutil.Ok(t, h.Close()) }() - indexCache, err := storecache.NewInMemoryIndexCacheWithConfig(logger, nil, nil, storecache.InMemoryIndexCacheConfig{}) - testutil.Ok(tb, err) + series := labels.FromStrings("__name__", "test") + app := h.Appender(context.Background()) + for ts := int64(0); ts < 10_000; ts++ { + _, err := app.Append(0, series, ts, float64(ts)) + testutil.Ok(t, err) + } + testutil.Ok(t, app.Commit()) - store, err := NewBucketStore( - instrBkt, - fetcher, - tmpDir, - NewChunksLimiterFactory(100000/MaxSamplesPerChunk), - NewSeriesLimiterFactory(0), - NewBytesLimiterFactory(0), - NewGapBasedPartitioner(PartitionerMaxGapSize), - 10, - false, - DefaultPostingOffsetInMemorySampling, - true, - false, - 0, - WithLogger(logger), - WithIndexCache(indexCache), - ) - testutil.Ok(tb, err) - testutil.Ok(tb, store.SyncBlocks(context.Background())) + blk := storetestutil.CreateBlockFromHead(t, headOpts.ChunkDirRoot, h) - reqMinTime := math.MinInt64 - reqMaxTime := math.MaxInt64 + thanosMeta := metadata.Thanos{ + Labels: labels.FromStrings("ext1", "1").Map(), + Downsample: metadata.ThanosDownsample{Resolution: 0}, + Source: metadata.TestSource, + } - testCases := []struct { - name string - calculateChecksum bool - }{ - { - name: "calculate checksum", - calculateChecksum: true, - }, - } + _, err = metadata.InjectThanos(log.NewNopLogger(), filepath.Join(headOpts.ChunkDirRoot, blk.String()), thanosMeta, nil) + testutil.Ok(t, err) - for _, tc := range testCases { - t.Run(tc.name, func(t *testing.T) { - req := &storepb.SeriesRequest{ - MinTime: int64(reqMinTime), - MaxTime: int64(reqMaxTime), - Matchers: []storepb.LabelMatcher{ - {Type: storepb.LabelMatcher_EQ, Name: "__name__", Value: "test"}, - }, - } + // Create a bucket and upload the block there. + bktDir := filepath.Join(tmpDir, "bucket") + bkt, err := filesystem.NewBucket(bktDir) + testutil.Ok(t, err) + defer func() { testutil.Ok(t, bkt.Close()) }() + + instrBkt := objstore.WithNoopInstr(bkt) + logger := log.NewNopLogger() + testutil.Ok(t, block.Upload(context.Background(), logger, bkt, filepath.Join(headOpts.ChunkDirRoot, blk.String()), metadata.NoneFunc, enableBirthstone)) + + // Instance a real bucket store we'll use to query the series. + baseBlockIDsFetcher := block.NewConcurrentLister(logger, instrBkt) + fetcher, err := block.NewMetaFetcher(logger, 10, instrBkt, baseBlockIDsFetcher, tmpDir, nil, nil) + testutil.Ok(tb, err) + + indexCache, err := storecache.NewInMemoryIndexCacheWithConfig(logger, nil, nil, storecache.InMemoryIndexCacheConfig{}) + testutil.Ok(tb, err) + + store, err := NewBucketStore( + instrBkt, + fetcher, + tmpDir, + NewChunksLimiterFactory(100000/MaxSamplesPerChunk), + NewSeriesLimiterFactory(0), + NewBytesLimiterFactory(0), + NewGapBasedPartitioner(PartitionerMaxGapSize), + 10, + false, + DefaultPostingOffsetInMemorySampling, + true, + false, + 0, + WithLogger(logger), + WithIndexCache(indexCache), + ) + testutil.Ok(tb, err) + testutil.Ok(tb, store.SyncBlocks(context.Background())) + + reqMinTime := math.MinInt64 + reqMaxTime := math.MaxInt64 + + testCases := []struct { + name string + calculateChecksum bool + }{ + { + name: "calculate checksum", + calculateChecksum: true, + }, + } - srv := newStoreSeriesServer(context.Background()) - err = store.Series(req, srv) - testutil.Ok(t, err) - testutil.Assert(t, len(srv.SeriesSet) == 1) + for _, tc := range testCases { + t.Run(tc.name, func(t *testing.T) { + req := &storepb.SeriesRequest{ + MinTime: int64(reqMinTime), + MaxTime: int64(reqMaxTime), + Matchers: []storepb.LabelMatcher{ + {Type: storepb.LabelMatcher_EQ, Name: "__name__", Value: "test"}, + }, + } - for _, rawChunk := range srv.SeriesSet[0].Chunks { - hash := rawChunk.Raw.Hash - decodedChunk, err := chunkenc.FromData(chunkenc.EncXOR, rawChunk.Raw.Data) + srv := newStoreSeriesServer(context.Background()) + err = store.Series(req, srv) testutil.Ok(t, err) - - if tc.calculateChecksum { - expectedHash := xxhash.Sum64(decodedChunk.Bytes()) - testutil.Equals(t, expectedHash, hash) - } else { - testutil.Equals(t, uint64(0), hash) + testutil.Assert(t, len(srv.SeriesSet) == 1) + + for _, rawChunk := range srv.SeriesSet[0].Chunks { + hash := rawChunk.Raw.Hash + decodedChunk, err := chunkenc.FromData(chunkenc.EncXOR, rawChunk.Raw.Data) + testutil.Ok(t, err) + + if tc.calculateChecksum { + expectedHash := xxhash.Sum64(decodedChunk.Bytes()) + testutil.Equals(t, expectedHash, hash) + } else { + testutil.Equals(t, uint64(0), hash) + } } - } - }) + }) + } } + + t.Run("enableBirthstone", func(t *testing.T) { + runTest(t, true) + }) + t.Run("disableBirthstone", func(t *testing.T) { + runTest(t, false) + }) } func labelNamesFromSeriesSet(series []*storepb.Series) []string { @@ -2758,7 +2830,7 @@ func BenchmarkBucketBlock_readChunkRange(b *testing.B) { blockMeta, err := metadata.InjectThanos(logger, filepath.Join(tmpDir, blockID.String()), thanosMeta, nil) testutil.Ok(b, err) - testutil.Ok(b, block.Upload(context.Background(), logger, bkt, filepath.Join(tmpDir, blockID.String()), metadata.NoneFunc)) + testutil.Ok(b, block.Upload(context.Background(), logger, bkt, filepath.Join(tmpDir, blockID.String()), metadata.NoneFunc, false)) // Create a chunk pool with buckets between 8B and 32KB. chunkPool, err := pool.NewBucketedPool[byte](8, 32*1024, 2, 1e10) @@ -2828,7 +2900,7 @@ func prepareBucket(b *testing.B, resolutionLevel compact.ResolutionLevel) (*buck blockMeta, err := metadata.InjectThanos(logger, filepath.Join(tmpDir, blockID.String()), thanosMeta, nil) testutil.Ok(b, err) - testutil.Ok(b, block.Upload(context.Background(), logger, bkt, filepath.Join(tmpDir, blockID.String()), metadata.NoneFunc)) + testutil.Ok(b, block.Upload(context.Background(), logger, bkt, filepath.Join(tmpDir, blockID.String()), metadata.NoneFunc, false)) if resolutionLevel > 0 { // Downsample newly-created block. @@ -2837,7 +2909,7 @@ func prepareBucket(b *testing.B, resolutionLevel compact.ResolutionLevel) (*buck blockMeta, err = metadata.ReadFromDir(filepath.Join(tmpDir, blockID.String())) testutil.Ok(b, err) - testutil.Ok(b, block.Upload(context.Background(), logger, bkt, filepath.Join(tmpDir, blockID.String()), metadata.NoneFunc)) + testutil.Ok(b, block.Upload(context.Background(), logger, bkt, filepath.Join(tmpDir, blockID.String()), metadata.NoneFunc, false)) } testutil.Ok(b, head.Close()) @@ -3478,116 +3550,125 @@ func TestPostingGroupMerge(t *testing.T) { func TestExpandedPostingsRace(t *testing.T) { t.Parallel() - const blockCount = 10 - - tmpDir := t.TempDir() - t.Cleanup(func() { - testutil.Ok(t, os.RemoveAll(tmpDir)) - }) + runTest := func(t *testing.T, enableBirthstone bool) { + const blockCount = 10 - bkt := objstore.NewInMemBucket() - t.Cleanup(func() { - testutil.Ok(t, bkt.Close()) - }) + tmpDir := t.TempDir() + t.Cleanup(func() { + testutil.Ok(t, os.RemoveAll(tmpDir)) + }) - logger := log.NewNopLogger() - // Create a block. - head, _ := storetestutil.CreateHeadWithSeries(t, 0, storetestutil.HeadGenOptions{ - TSDBDir: filepath.Join(tmpDir, "head"), - SamplesPerSeries: 10, - ScrapeInterval: 15 * time.Second, - Series: 1000, - PrependLabels: labels.EmptyLabels(), - Random: rand.New(rand.NewSource(120)), - SkipChunks: true, - }) - blockID := storetestutil.CreateBlockFromHead(t, tmpDir, head) + bkt := objstore.NewInMemBucket() + t.Cleanup(func() { + testutil.Ok(t, bkt.Close()) + }) - bucketBlocks := make([]*bucketBlock, 0, blockCount) + logger := log.NewNopLogger() + // Create a block. + head, _ := storetestutil.CreateHeadWithSeries(t, 0, storetestutil.HeadGenOptions{ + TSDBDir: filepath.Join(tmpDir, "head"), + SamplesPerSeries: 10, + ScrapeInterval: 15 * time.Second, + Series: 1000, + PrependLabels: labels.EmptyLabels(), + Random: rand.New(rand.NewSource(120)), + SkipChunks: true, + }) + blockID := storetestutil.CreateBlockFromHead(t, tmpDir, head) - for i := 0; i < blockCount; i++ { - ul := ulid.MustNew(uint64(i), rand.New(rand.NewSource(444))) + bucketBlocks := make([]*bucketBlock, 0, blockCount) - // Upload the block to the bucket. - thanosMeta := metadata.Thanos{ - Labels: labels.FromStrings("ext1", fmt.Sprintf("%d", i)).Map(), - Downsample: metadata.ThanosDownsample{Resolution: 0}, - Source: metadata.TestSource, - } - m, err := metadata.ReadFromDir(filepath.Join(tmpDir, blockID.String())) - testutil.Ok(t, err) + for i := 0; i < blockCount; i++ { + ul := ulid.MustNew(uint64(i), rand.New(rand.NewSource(444))) - m.Thanos = thanosMeta - m.BlockMeta.ULID = ul + // Upload the block to the bucket. + thanosMeta := metadata.Thanos{ + Labels: labels.FromStrings("ext1", fmt.Sprintf("%d", i)).Map(), + Downsample: metadata.ThanosDownsample{Resolution: 0}, + Source: metadata.TestSource, + } + m, err := metadata.ReadFromDir(filepath.Join(tmpDir, blockID.String())) + testutil.Ok(t, err) - e2eutil.Copy(t, filepath.Join(tmpDir, blockID.String()), filepath.Join(tmpDir, ul.String())) - testutil.Ok(t, m.WriteToDir(log.NewLogfmtLogger(os.Stderr), filepath.Join(tmpDir, ul.String()))) - testutil.Ok(t, err) - testutil.Ok(t, block.Upload(context.Background(), log.NewLogfmtLogger(os.Stderr), bkt, filepath.Join(tmpDir, ul.String()), metadata.NoneFunc)) + m.Thanos = thanosMeta + m.BlockMeta.ULID = ul - r, err := indexheader.NewBinaryReader(context.Background(), log.NewNopLogger(), bkt, tmpDir, ul, DefaultPostingOffsetInMemorySampling, indexheader.NewBinaryReaderMetrics(nil)) - testutil.Ok(t, err) + e2eutil.Copy(t, filepath.Join(tmpDir, blockID.String()), filepath.Join(tmpDir, ul.String())) + testutil.Ok(t, m.WriteToDir(log.NewLogfmtLogger(os.Stderr), filepath.Join(tmpDir, ul.String()))) + testutil.Ok(t, err) + testutil.Ok(t, block.Upload(context.Background(), log.NewLogfmtLogger(os.Stderr), bkt, filepath.Join(tmpDir, ul.String()), metadata.NoneFunc, enableBirthstone)) - blk, err := newBucketBlock( - context.Background(), - newBucketStoreMetrics(nil), - m, - bkt, - filepath.Join(tmpDir, ul.String()), - noopCache{}, - nil, - r, - NewGapBasedPartitioner(PartitionerMaxGapSize), - nil, - nil, - ) - testutil.Ok(t, err) + r, err := indexheader.NewBinaryReader(context.Background(), log.NewNopLogger(), bkt, tmpDir, ul, DefaultPostingOffsetInMemorySampling, indexheader.NewBinaryReaderMetrics(nil)) + testutil.Ok(t, err) - bucketBlocks = append(bucketBlocks, blk) - } + blk, err := newBucketBlock( + context.Background(), + newBucketStoreMetrics(nil), + m, + bkt, + filepath.Join(tmpDir, ul.String()), + noopCache{}, + nil, + r, + NewGapBasedPartitioner(PartitionerMaxGapSize), + nil, + nil, + ) + testutil.Ok(t, err) - tm, cancel := context.WithTimeout(context.Background(), 10*time.Second) - t.Cleanup(cancel) + bucketBlocks = append(bucketBlocks, blk) + } - l := sync.Mutex{} - previousRefs := make(map[int][]storage.SeriesRef) - dummyCounter := promauto.With(prometheus.NewRegistry()).NewCounter(prometheus.CounterOpts{Name: "test"}) + tm, cancel := context.WithTimeout(context.Background(), 10*time.Second) + t.Cleanup(cancel) - for { - if tm.Err() != nil { - break - } + l := sync.Mutex{} + previousRefs := make(map[int][]storage.SeriesRef) + dummyCounter := promauto.With(prometheus.NewRegistry()).NewCounter(prometheus.CounterOpts{Name: "test"}) - m := []*labels.Matcher{ - labels.MustNewMatcher(labels.MatchEqual, "foo", "bar"), - labels.MustNewMatcher(labels.MatchRegexp, "j", ".+"), - labels.MustNewMatcher(labels.MatchRegexp, "i", ".+"), - labels.MustNewMatcher(labels.MatchEqual, "foo", "bar"), - labels.MustNewMatcher(labels.MatchRegexp, "j", ".+"), - labels.MustNewMatcher(labels.MatchRegexp, "i", ".+"), - labels.MustNewMatcher(labels.MatchEqual, "foo", "bar"), - } + for { + if tm.Err() != nil { + break + } - wg := &sync.WaitGroup{} - for i, bb := range bucketBlocks { - wg.Add(1) + m := []*labels.Matcher{ + labels.MustNewMatcher(labels.MatchEqual, "foo", "bar"), + labels.MustNewMatcher(labels.MatchRegexp, "j", ".+"), + labels.MustNewMatcher(labels.MatchRegexp, "i", ".+"), + labels.MustNewMatcher(labels.MatchEqual, "foo", "bar"), + labels.MustNewMatcher(labels.MatchRegexp, "j", ".+"), + labels.MustNewMatcher(labels.MatchRegexp, "i", ".+"), + labels.MustNewMatcher(labels.MatchEqual, "foo", "bar"), + } - go func(i int, bb *bucketBlock) { - refs, err := bb.indexReader(logger).ExpandedPostings(context.Background(), m, NewBytesLimiterFactory(0)(nil), false, dummyCounter, tenancy.DefaultTenant) - testutil.Ok(t, err) - defer wg.Done() - - l.Lock() - defer l.Unlock() - if previousRefs[i] != nil { - testutil.Equals(t, previousRefs[i], refs.postings) - } else { - previousRefs[i] = refs.postings - } - }(i, bb) + wg := &sync.WaitGroup{} + for i, bb := range bucketBlocks { + wg.Add(1) + + go func(i int, bb *bucketBlock) { + refs, err := bb.indexReader(logger).ExpandedPostings(context.Background(), m, NewBytesLimiterFactory(0)(nil), false, dummyCounter, tenancy.DefaultTenant) + testutil.Ok(t, err) + defer wg.Done() + + l.Lock() + defer l.Unlock() + if previousRefs[i] != nil { + testutil.Equals(t, previousRefs[i], refs.postings) + } else { + previousRefs[i] = refs.postings + } + }(i, bb) + } + wg.Wait() } - wg.Wait() } + + t.Run("enableBirthstone", func(t *testing.T) { + runTest(t, true) + }) + t.Run("disableBirthstone", func(t *testing.T) { + runTest(t, false) + }) } func TestBucketIndexReader_decodeCachedPostingsErrors(t *testing.T) { @@ -3607,99 +3688,108 @@ func TestBucketIndexReader_decodeCachedPostingsErrors(t *testing.T) { func TestBucketStoreDedupOnBlockSeriesSet(t *testing.T) { t.Parallel() - logger := log.NewNopLogger() - tmpDir := t.TempDir() - bktDir := filepath.Join(tmpDir, "bkt") - auxDir := filepath.Join(tmpDir, "aux") - metaDir := filepath.Join(tmpDir, "meta") - extLset := labels.FromStrings("region", "eu-west") - - testutil.Ok(t, os.MkdirAll(metaDir, os.ModePerm)) - testutil.Ok(t, os.MkdirAll(auxDir, os.ModePerm)) + runTest := func(t *testing.T, enableBirthstone bool) { + logger := log.NewNopLogger() + tmpDir := t.TempDir() + bktDir := filepath.Join(tmpDir, "bkt") + auxDir := filepath.Join(tmpDir, "aux") + metaDir := filepath.Join(tmpDir, "meta") + extLset := labels.FromStrings("region", "eu-west") - bkt, err := filesystem.NewBucket(bktDir) - testutil.Ok(t, err) - t.Cleanup(func() { testutil.Ok(t, bkt.Close()) }) + testutil.Ok(t, os.MkdirAll(metaDir, os.ModePerm)) + testutil.Ok(t, os.MkdirAll(auxDir, os.ModePerm)) - for i := 0; i < 2; i++ { - headOpts := tsdb.DefaultHeadOptions() - headOpts.ChunkDirRoot = tmpDir - headOpts.ChunkRange = 1000 - h, err := tsdb.NewHead(nil, nil, nil, nil, headOpts, nil) + bkt, err := filesystem.NewBucket(bktDir) testutil.Ok(t, err) - t.Cleanup(func() { testutil.Ok(t, h.Close()) }) + t.Cleanup(func() { testutil.Ok(t, bkt.Close()) }) - app := h.Appender(context.Background()) - _, err = app.Append(0, labels.FromStrings("replica", "a", "z", "1"), 0, 1) - testutil.Ok(t, err) - _, err = app.Append(0, labels.FromStrings("replica", "a", "z", "2"), 0, 1) - testutil.Ok(t, err) - _, err = app.Append(0, labels.FromStrings("replica", "b", "z", "1"), 0, 1) - testutil.Ok(t, err) - _, err = app.Append(0, labels.FromStrings("replica", "b", "z", "2"), 0, 1) - testutil.Ok(t, err) - testutil.Ok(t, app.Commit()) + for i := 0; i < 2; i++ { + headOpts := tsdb.DefaultHeadOptions() + headOpts.ChunkDirRoot = tmpDir + headOpts.ChunkRange = 1000 + h, err := tsdb.NewHead(nil, nil, nil, nil, headOpts, nil) + testutil.Ok(t, err) + t.Cleanup(func() { testutil.Ok(t, h.Close()) }) - id := storetestutil.CreateBlockFromHead(t, auxDir, h) + app := h.Appender(context.Background()) + _, err = app.Append(0, labels.FromStrings("replica", "a", "z", "1"), 0, 1) + testutil.Ok(t, err) + _, err = app.Append(0, labels.FromStrings("replica", "a", "z", "2"), 0, 1) + testutil.Ok(t, err) + _, err = app.Append(0, labels.FromStrings("replica", "b", "z", "1"), 0, 1) + testutil.Ok(t, err) + _, err = app.Append(0, labels.FromStrings("replica", "b", "z", "2"), 0, 1) + testutil.Ok(t, err) + testutil.Ok(t, app.Commit()) - auxBlockDir := filepath.Join(auxDir, id.String()) - _, err = metadata.InjectThanos(log.NewNopLogger(), auxBlockDir, metadata.Thanos{ - Labels: extLset.Map(), - Downsample: metadata.ThanosDownsample{Resolution: 0}, - Source: metadata.TestSource, - }, nil) - testutil.Ok(t, err) + id := storetestutil.CreateBlockFromHead(t, auxDir, h) - testutil.Ok(t, block.Upload(context.Background(), logger, bkt, auxBlockDir, metadata.NoneFunc)) - testutil.Ok(t, block.Upload(context.Background(), logger, bkt, auxBlockDir, metadata.NoneFunc)) - } + auxBlockDir := filepath.Join(auxDir, id.String()) + _, err = metadata.InjectThanos(log.NewNopLogger(), auxBlockDir, metadata.Thanos{ + Labels: extLset.Map(), + Downsample: metadata.ThanosDownsample{Resolution: 0}, + Source: metadata.TestSource, + }, nil) + testutil.Ok(t, err) - chunkPool, err := NewDefaultChunkBytesPool(2e5) - testutil.Ok(t, err) + testutil.Ok(t, block.Upload(context.Background(), logger, bkt, auxBlockDir, metadata.NoneFunc, enableBirthstone)) + testutil.Ok(t, block.Upload(context.Background(), logger, bkt, auxBlockDir, metadata.NoneFunc, enableBirthstone)) + } - insBkt := objstore.WithNoopInstr(bkt) - baseBlockIDsFetcher := block.NewConcurrentLister(logger, insBkt) - metaFetcher, err := block.NewMetaFetcher(logger, 20, insBkt, baseBlockIDsFetcher, metaDir, nil, []block.MetadataFilter{ - block.NewTimePartitionMetaFilter(allowAllFilterConf.MinTime, allowAllFilterConf.MaxTime), - }) - testutil.Ok(t, err) + chunkPool, err := NewDefaultChunkBytesPool(2e5) + testutil.Ok(t, err) - bucketStore, err := NewBucketStore( - objstore.WithNoopInstr(bkt), - metaFetcher, - "", - NewChunksLimiterFactory(10e6), - NewSeriesLimiterFactory(10e6), - NewBytesLimiterFactory(10e6), - NewGapBasedPartitioner(PartitionerMaxGapSize), - 20, - true, - DefaultPostingOffsetInMemorySampling, - false, - false, - 1*time.Minute, - WithChunkPool(chunkPool), - WithFilterConfig(allowAllFilterConf), - ) - testutil.Ok(t, err) - t.Cleanup(func() { testutil.Ok(t, bucketStore.Close()) }) + insBkt := objstore.WithNoopInstr(bkt) + baseBlockIDsFetcher := block.NewConcurrentLister(logger, insBkt) + metaFetcher, err := block.NewMetaFetcher(logger, 20, insBkt, baseBlockIDsFetcher, metaDir, nil, []block.MetadataFilter{ + block.NewTimePartitionMetaFilter(allowAllFilterConf.MinTime, allowAllFilterConf.MaxTime), + }) + testutil.Ok(t, err) + + bucketStore, err := NewBucketStore( + objstore.WithNoopInstr(bkt), + metaFetcher, + "", + NewChunksLimiterFactory(10e6), + NewSeriesLimiterFactory(10e6), + NewBytesLimiterFactory(10e6), + NewGapBasedPartitioner(PartitionerMaxGapSize), + 20, + true, + DefaultPostingOffsetInMemorySampling, + false, + false, + 1*time.Minute, + WithChunkPool(chunkPool), + WithFilterConfig(allowAllFilterConf), + ) + testutil.Ok(t, err) + t.Cleanup(func() { testutil.Ok(t, bucketStore.Close()) }) - testutil.Ok(t, bucketStore.SyncBlocks(context.Background())) + testutil.Ok(t, bucketStore.SyncBlocks(context.Background())) - srv := newStoreSeriesServer(context.Background()) - testutil.Ok(t, bucketStore.Series(&storepb.SeriesRequest{ - WithoutReplicaLabels: []string{"replica"}, - MinTime: timestamp.FromTime(minTime), - MaxTime: timestamp.FromTime(maxTime), - Matchers: []storepb.LabelMatcher{ - {Type: storepb.LabelMatcher_NEQ, Name: "z", Value: ""}, - }, - }, srv)) + srv := newStoreSeriesServer(context.Background()) + testutil.Ok(t, bucketStore.Series(&storepb.SeriesRequest{ + WithoutReplicaLabels: []string{"replica"}, + MinTime: timestamp.FromTime(minTime), + MaxTime: timestamp.FromTime(maxTime), + Matchers: []storepb.LabelMatcher{ + {Type: storepb.LabelMatcher_NEQ, Name: "z", Value: ""}, + }, + }, srv)) + + testutil.Equals(t, true, slices.IsSortedFunc(srv.SeriesSet, func(x, y storepb.Series) int { + return labels.Compare(x.PromLabels(), y.PromLabels()) + })) + testutil.Equals(t, 2, len(srv.SeriesSet)) + } - testutil.Equals(t, true, slices.IsSortedFunc(srv.SeriesSet, func(x, y storepb.Series) int { - return labels.Compare(x.PromLabels(), y.PromLabels()) - })) - testutil.Equals(t, 2, len(srv.SeriesSet)) + t.Run("enableBirthstone", func(t *testing.T) { + runTest(t, true) + }) + t.Run("disableBirthstone", func(t *testing.T) { + runTest(t, false) + }) } func TestQueryStatsMerge(t *testing.T) { @@ -3827,124 +3917,133 @@ func TestQueryStatsMerge(t *testing.T) { func TestBucketStoreStreamingSeriesLimit(t *testing.T) { t.Parallel() - logger := log.NewNopLogger() - tmpDir := t.TempDir() - bktDir := filepath.Join(tmpDir, "bkt") - auxDir := filepath.Join(tmpDir, "aux") - metaDir := filepath.Join(tmpDir, "meta") - extLset := labels.FromStrings("region", "eu-west") + runTest := func(t *testing.T, enableBirthstone bool) { + logger := log.NewNopLogger() + tmpDir := t.TempDir() + bktDir := filepath.Join(tmpDir, "bkt") + auxDir := filepath.Join(tmpDir, "aux") + metaDir := filepath.Join(tmpDir, "meta") + extLset := labels.FromStrings("region", "eu-west") - testutil.Ok(t, os.MkdirAll(metaDir, os.ModePerm)) - testutil.Ok(t, os.MkdirAll(auxDir, os.ModePerm)) + testutil.Ok(t, os.MkdirAll(metaDir, os.ModePerm)) + testutil.Ok(t, os.MkdirAll(auxDir, os.ModePerm)) - bkt, err := filesystem.NewBucket(bktDir) - testutil.Ok(t, err) - t.Cleanup(func() { testutil.Ok(t, bkt.Close()) }) + bkt, err := filesystem.NewBucket(bktDir) + testutil.Ok(t, err) + t.Cleanup(func() { testutil.Ok(t, bkt.Close()) }) - headOpts := tsdb.DefaultHeadOptions() - headOpts.ChunkDirRoot = tmpDir - headOpts.ChunkRange = 1000 - h, err := tsdb.NewHead(nil, nil, nil, nil, headOpts, nil) - testutil.Ok(t, err) - t.Cleanup(func() { testutil.Ok(t, h.Close()) }) + headOpts := tsdb.DefaultHeadOptions() + headOpts.ChunkDirRoot = tmpDir + headOpts.ChunkRange = 1000 + h, err := tsdb.NewHead(nil, nil, nil, nil, headOpts, nil) + testutil.Ok(t, err) + t.Cleanup(func() { testutil.Ok(t, h.Close()) }) - app := h.Appender(context.Background()) - _, err = app.Append(0, labels.FromStrings("a", "1", "z", "1"), 0, 1) - testutil.Ok(t, err) - _, err = app.Append(0, labels.FromStrings("a", "1", "z", "2"), 0, 1) - testutil.Ok(t, err) - _, err = app.Append(0, labels.FromStrings("a", "1", "z", "3"), 0, 1) - testutil.Ok(t, err) - _, err = app.Append(0, labels.FromStrings("a", "1", "z", "4"), 0, 1) - testutil.Ok(t, err) - _, err = app.Append(0, labels.FromStrings("a", "1", "z", "5"), 0, 1) - testutil.Ok(t, err) - _, err = app.Append(0, labels.FromStrings("a", "1", "z", "6"), 0, 1) - testutil.Ok(t, err) - testutil.Ok(t, app.Commit()) + app := h.Appender(context.Background()) + _, err = app.Append(0, labels.FromStrings("a", "1", "z", "1"), 0, 1) + testutil.Ok(t, err) + _, err = app.Append(0, labels.FromStrings("a", "1", "z", "2"), 0, 1) + testutil.Ok(t, err) + _, err = app.Append(0, labels.FromStrings("a", "1", "z", "3"), 0, 1) + testutil.Ok(t, err) + _, err = app.Append(0, labels.FromStrings("a", "1", "z", "4"), 0, 1) + testutil.Ok(t, err) + _, err = app.Append(0, labels.FromStrings("a", "1", "z", "5"), 0, 1) + testutil.Ok(t, err) + _, err = app.Append(0, labels.FromStrings("a", "1", "z", "6"), 0, 1) + testutil.Ok(t, err) + testutil.Ok(t, app.Commit()) - id := storetestutil.CreateBlockFromHead(t, auxDir, h) + id := storetestutil.CreateBlockFromHead(t, auxDir, h) - auxBlockDir := filepath.Join(auxDir, id.String()) - _, err = metadata.InjectThanos(log.NewNopLogger(), auxBlockDir, metadata.Thanos{ - Labels: extLset.Map(), - Downsample: metadata.ThanosDownsample{Resolution: 0}, - Source: metadata.TestSource, - }, nil) - testutil.Ok(t, err) - testutil.Ok(t, block.Upload(context.Background(), logger, bkt, auxBlockDir, metadata.NoneFunc)) + auxBlockDir := filepath.Join(auxDir, id.String()) + _, err = metadata.InjectThanos(log.NewNopLogger(), auxBlockDir, metadata.Thanos{ + Labels: extLset.Map(), + Downsample: metadata.ThanosDownsample{Resolution: 0}, + Source: metadata.TestSource, + }, nil) + testutil.Ok(t, err) + testutil.Ok(t, block.Upload(context.Background(), logger, bkt, auxBlockDir, metadata.NoneFunc, enableBirthstone)) - chunkPool, err := NewDefaultChunkBytesPool(2e5) - testutil.Ok(t, err) + chunkPool, err := NewDefaultChunkBytesPool(2e5) + testutil.Ok(t, err) - insBkt := objstore.WithNoopInstr(bkt) - baseBlockIDsFetcher := block.NewConcurrentLister(logger, insBkt) - metaFetcher, err := block.NewMetaFetcher(logger, 20, insBkt, baseBlockIDsFetcher, metaDir, nil, []block.MetadataFilter{ - block.NewTimePartitionMetaFilter(allowAllFilterConf.MinTime, allowAllFilterConf.MaxTime), - }) - testutil.Ok(t, err) + insBkt := objstore.WithNoopInstr(bkt) + baseBlockIDsFetcher := block.NewConcurrentLister(logger, insBkt) + metaFetcher, err := block.NewMetaFetcher(logger, 20, insBkt, baseBlockIDsFetcher, metaDir, nil, []block.MetadataFilter{ + block.NewTimePartitionMetaFilter(allowAllFilterConf.MinTime, allowAllFilterConf.MaxTime), + }) + testutil.Ok(t, err) - firstBytesLimiterChecked := false - secondBytesLimiterChecked := false - - // Set series limit to 2. Only pass if series limiter applies - // for lazy postings only. - bucketStore, err := NewBucketStore( - objstore.WithNoopInstr(bkt), - metaFetcher, - "", - NewChunksLimiterFactory(10e6), - NewSeriesLimiterFactory(2), - func(_ prometheus.Counter) BytesLimiter { - return &compositeBytesLimiterMock{ - limiters: []BytesLimiter{ - &bytesLimiterMock{ - limitFunc: func(_ uint64, _ StoreDataType) error { - firstBytesLimiterChecked = true - return nil + firstBytesLimiterChecked := false + secondBytesLimiterChecked := false + + // Set series limit to 2. Only pass if series limiter applies + // for lazy postings only. + bucketStore, err := NewBucketStore( + objstore.WithNoopInstr(bkt), + metaFetcher, + "", + NewChunksLimiterFactory(10e6), + NewSeriesLimiterFactory(2), + func(_ prometheus.Counter) BytesLimiter { + return &compositeBytesLimiterMock{ + limiters: []BytesLimiter{ + &bytesLimiterMock{ + limitFunc: func(_ uint64, _ StoreDataType) error { + firstBytesLimiterChecked = true + return nil + }, }, - }, - &bytesLimiterMock{ - limitFunc: func(_ uint64, _ StoreDataType) error { - secondBytesLimiterChecked = true - return nil + &bytesLimiterMock{ + limitFunc: func(_ uint64, _ StoreDataType) error { + secondBytesLimiterChecked = true + return nil + }, }, }, - }, - } - }, - NewGapBasedPartitioner(PartitionerMaxGapSize), - 20, - true, - DefaultPostingOffsetInMemorySampling, - false, - false, - 1*time.Minute, - WithChunkPool(chunkPool), - WithFilterConfig(allowAllFilterConf), - WithLazyExpandedPostings(true), - WithBlockEstimatedMaxSeriesFunc(func(_ metadata.Meta) uint64 { - return 1 - }), - ) - testutil.Ok(t, err) - t.Cleanup(func() { testutil.Ok(t, bucketStore.Close()) }) + } + }, + NewGapBasedPartitioner(PartitionerMaxGapSize), + 20, + true, + DefaultPostingOffsetInMemorySampling, + false, + false, + 1*time.Minute, + WithChunkPool(chunkPool), + WithFilterConfig(allowAllFilterConf), + WithLazyExpandedPostings(true), + WithBlockEstimatedMaxSeriesFunc(func(_ metadata.Meta) uint64 { + return 1 + }), + ) + testutil.Ok(t, err) + t.Cleanup(func() { testutil.Ok(t, bucketStore.Close()) }) - testutil.Ok(t, bucketStore.SyncBlocks(context.Background())) + testutil.Ok(t, bucketStore.SyncBlocks(context.Background())) - req := &storepb.SeriesRequest{ - MinTime: timestamp.FromTime(minTime), - MaxTime: timestamp.FromTime(maxTime), - Matchers: []storepb.LabelMatcher{ - {Type: storepb.LabelMatcher_EQ, Name: "a", Value: "1"}, - {Type: storepb.LabelMatcher_RE, Name: "z", Value: "1|2"}, - }, + req := &storepb.SeriesRequest{ + MinTime: timestamp.FromTime(minTime), + MaxTime: timestamp.FromTime(maxTime), + Matchers: []storepb.LabelMatcher{ + {Type: storepb.LabelMatcher_EQ, Name: "a", Value: "1"}, + {Type: storepb.LabelMatcher_RE, Name: "z", Value: "1|2"}, + }, + } + srv := newStoreSeriesServer(context.Background()) + testutil.Ok(t, bucketStore.Series(req, srv)) + testutil.Equals(t, 2, len(srv.SeriesSet)) + testutil.Equals(t, true, firstBytesLimiterChecked) + testutil.Equals(t, true, secondBytesLimiterChecked) } - srv := newStoreSeriesServer(context.Background()) - testutil.Ok(t, bucketStore.Series(req, srv)) - testutil.Equals(t, 2, len(srv.SeriesSet)) - testutil.Equals(t, true, firstBytesLimiterChecked) - testutil.Equals(t, true, secondBytesLimiterChecked) + + t.Run("enableBirthstone", func(t *testing.T) { + runTest(t, true) + }) + t.Run("disableBirthstone", func(t *testing.T) { + runTest(t, false) + }) } type bytesLimiterMock struct { @@ -3971,136 +4070,145 @@ func (m *compositeBytesLimiterMock) ReserveWithType(num uint64, dataType StoreDa func TestBucketStoreMetadataLimit(t *testing.T) { t.Parallel() - tb := testutil.NewTB(t) - - tmpDir := t.TempDir() + runTest := func(t *testing.T, enableBirthstone bool) { + tb := testutil.NewTB(t) - bkt, err := filesystem.NewBucket(filepath.Join(tmpDir, "bkt")) - testutil.Ok(tb, err) - defer func() { testutil.Ok(tb, bkt.Close()) }() + tmpDir := t.TempDir() - uploadTestBlock(tb, tmpDir, bkt, 30000) + bkt, err := filesystem.NewBucket(filepath.Join(tmpDir, "bkt")) + testutil.Ok(tb, err) + defer func() { testutil.Ok(tb, bkt.Close()) }() - instrBkt := objstore.WithNoopInstr(bkt) - logger := log.NewNopLogger() + uploadTestBlock(tb, tmpDir, bkt, 30000, enableBirthstone) - // Instance a real bucket store we'll use to query the series. - baseBlockIDsFetcher := block.NewConcurrentLister(logger, instrBkt) - fetcher, err := block.NewMetaFetcher(logger, 10, instrBkt, baseBlockIDsFetcher, tmpDir, nil, nil) - testutil.Ok(tb, err) + instrBkt := objstore.WithNoopInstr(bkt) + logger := log.NewNopLogger() - indexCache, err := storecache.NewInMemoryIndexCacheWithConfig(logger, nil, nil, storecache.InMemoryIndexCacheConfig{}) - testutil.Ok(tb, err) + // Instance a real bucket store we'll use to query the series. + baseBlockIDsFetcher := block.NewConcurrentLister(logger, instrBkt) + fetcher, err := block.NewMetaFetcher(logger, 10, instrBkt, baseBlockIDsFetcher, tmpDir, nil, nil) + testutil.Ok(tb, err) - store, err := NewBucketStore( - instrBkt, - fetcher, - tmpDir, - NewChunksLimiterFactory(0), - NewSeriesLimiterFactory(0), - NewBytesLimiterFactory(0), - NewGapBasedPartitioner(PartitionerMaxGapSize), - 10, - false, - DefaultPostingOffsetInMemorySampling, - true, - false, - 0, - WithLogger(logger), - WithIndexCache(indexCache), - ) - testutil.Ok(tb, err) - testutil.Ok(tb, store.SyncBlocks(context.Background())) + indexCache, err := storecache.NewInMemoryIndexCacheWithConfig(logger, nil, nil, storecache.InMemoryIndexCacheConfig{}) + testutil.Ok(tb, err) - seriesTests := map[string]struct { - limit int64 - expectedResults int - }{ - "series without limit": { - expectedResults: 12000, - }, - "series with limit": { - limit: 11000, - expectedResults: 11000, - }, - } + store, err := NewBucketStore( + instrBkt, + fetcher, + tmpDir, + NewChunksLimiterFactory(0), + NewSeriesLimiterFactory(0), + NewBytesLimiterFactory(0), + NewGapBasedPartitioner(PartitionerMaxGapSize), + 10, + false, + DefaultPostingOffsetInMemorySampling, + true, + false, + 0, + WithLogger(logger), + WithIndexCache(indexCache), + ) + testutil.Ok(tb, err) + testutil.Ok(tb, store.SyncBlocks(context.Background())) + + seriesTests := map[string]struct { + limit int64 + expectedResults int + }{ + "series without limit": { + expectedResults: 12000, + }, + "series with limit": { + limit: 11000, + expectedResults: 11000, + }, + } - for testName, testData := range seriesTests { - t.Run(testName, func(t *testing.T) { - req := &storepb.SeriesRequest{ - MinTime: timestamp.FromTime(minTime), - MaxTime: timestamp.FromTime(maxTime), - Limit: testData.limit, - Matchers: []storepb.LabelMatcher{ - {Type: storepb.LabelMatcher_EQ, Name: "j", Value: "foo"}, - }, - } + for testName, testData := range seriesTests { + t.Run(testName, func(t *testing.T) { + req := &storepb.SeriesRequest{ + MinTime: timestamp.FromTime(minTime), + MaxTime: timestamp.FromTime(maxTime), + Limit: testData.limit, + Matchers: []storepb.LabelMatcher{ + {Type: storepb.LabelMatcher_EQ, Name: "j", Value: "foo"}, + }, + } - srv := newStoreSeriesServer(context.Background()) - err = store.Series(req, srv) - testutil.Ok(t, err) - testutil.Assert(t, len(srv.SeriesSet) == testData.expectedResults) - }) - } + srv := newStoreSeriesServer(context.Background()) + err = store.Series(req, srv) + testutil.Ok(t, err) + testutil.Assert(t, len(srv.SeriesSet) == testData.expectedResults) + }) + } - labelNamesTests := map[string]struct { - limit int64 - expectedResults []string - }{ - "label names without limit": { - expectedResults: []string{"ext1", "i", "j", "n", "uniq"}, - }, - "label names with limit": { - limit: 3, - expectedResults: []string{"ext1", "i", "j"}, - }, - } + labelNamesTests := map[string]struct { + limit int64 + expectedResults []string + }{ + "label names without limit": { + expectedResults: []string{"ext1", "i", "j", "n", "uniq"}, + }, + "label names with limit": { + limit: 3, + expectedResults: []string{"ext1", "i", "j"}, + }, + } - for testName, testData := range labelNamesTests { - t.Run(testName, func(t *testing.T) { - req := &storepb.LabelNamesRequest{ - Start: timestamp.FromTime(minTime), - End: timestamp.FromTime(maxTime), - Limit: testData.limit, - Matchers: []storepb.LabelMatcher{ - {Type: storepb.LabelMatcher_EQ, Name: "j", Value: "foo"}, - }, - } + for testName, testData := range labelNamesTests { + t.Run(testName, func(t *testing.T) { + req := &storepb.LabelNamesRequest{ + Start: timestamp.FromTime(minTime), + End: timestamp.FromTime(maxTime), + Limit: testData.limit, + Matchers: []storepb.LabelMatcher{ + {Type: storepb.LabelMatcher_EQ, Name: "j", Value: "foo"}, + }, + } - resp, err := store.LabelNames(context.Background(), req) - testutil.Ok(t, err) - testutil.Equals(t, testData.expectedResults, resp.Names) - }) - } + resp, err := store.LabelNames(context.Background(), req) + testutil.Ok(t, err) + testutil.Equals(t, testData.expectedResults, resp.Names) + }) + } - labelValuesTests := map[string]struct { - limit int64 - expectedResults []string - }{ - "label values without limit": { - expectedResults: []string{"bar", "foo"}, - }, - "label values with limit": { - limit: 1, - expectedResults: []string{"bar"}, - }, - } + labelValuesTests := map[string]struct { + limit int64 + expectedResults []string + }{ + "label values without limit": { + expectedResults: []string{"bar", "foo"}, + }, + "label values with limit": { + limit: 1, + expectedResults: []string{"bar"}, + }, + } - for testName, testData := range labelValuesTests { - t.Run(testName, func(t *testing.T) { - req := &storepb.LabelValuesRequest{ - Start: timestamp.FromTime(minTime), - End: timestamp.FromTime(maxTime), - Label: "j", - Limit: testData.limit, - Matchers: []storepb.LabelMatcher{ - {Type: storepb.LabelMatcher_RE, Name: "j", Value: "(foo|bar)"}, - }, - } + for testName, testData := range labelValuesTests { + t.Run(testName, func(t *testing.T) { + req := &storepb.LabelValuesRequest{ + Start: timestamp.FromTime(minTime), + End: timestamp.FromTime(maxTime), + Label: "j", + Limit: testData.limit, + Matchers: []storepb.LabelMatcher{ + {Type: storepb.LabelMatcher_RE, Name: "j", Value: "(foo|bar)"}, + }, + } - resp, err := store.LabelValues(context.Background(), req) - testutil.Ok(t, err) - testutil.Equals(t, testData.expectedResults, resp.Values) - }) + resp, err := store.LabelValues(context.Background(), req) + testutil.Ok(t, err) + testutil.Equals(t, testData.expectedResults, resp.Values) + }) + } } + + t.Run("enableBirthstone", func(t *testing.T) { + runTest(t, true) + }) + t.Run("disableBirthstone", func(t *testing.T) { + runTest(t, false) + }) } diff --git a/pkg/verifier/index_issue.go b/pkg/verifier/index_issue.go index d0e91d1c1b2..e28547aa8bf 100644 --- a/pkg/verifier/index_issue.go +++ b/pkg/verifier/index_issue.go @@ -116,7 +116,7 @@ func repairIndex(stats block.HealthStats, ctx Context, id ulid.ULID, meta *metad } level.Info(ctx.Logger).Log("msg", "uploading repaired block", "newID", resid) - if err = block.Upload(ctx, ctx.Logger, ctx.Bkt, filepath.Join(dir, resid.String()), metadata.NoneFunc); err != nil { + if err = block.Upload(ctx, ctx.Logger, ctx.Bkt, filepath.Join(dir, resid.String()), metadata.NoneFunc, ctx.EnableBirthstone); err != nil { return errors.Wrapf(err, "upload of %s failed", resid) } diff --git a/pkg/verifier/safe_delete.go b/pkg/verifier/safe_delete.go index 65601336d22..9c68acea9cf 100644 --- a/pkg/verifier/safe_delete.go +++ b/pkg/verifier/safe_delete.go @@ -67,7 +67,7 @@ func BackupAndDelete(ctx Context, id ulid.ULID) error { } // Backup the block. - if err := backupDownloaded(ctx, ctx.Logger, dir, ctx.BackupBkt, id); err != nil { + if err := backupDownloaded(ctx, ctx.Logger, dir, ctx.BackupBkt, id, ctx.EnableBirthstone); err != nil { return err } @@ -103,7 +103,7 @@ func BackupAndDeleteDownloaded(ctx Context, bdir string, id ulid.ULID) error { } // Backup the block. - if err := backupDownloaded(ctx, ctx.Logger, bdir, ctx.BackupBkt, id); err != nil { + if err := backupDownloaded(ctx, ctx.Logger, bdir, ctx.BackupBkt, id, ctx.EnableBirthstone); err != nil { return err } @@ -126,7 +126,7 @@ func BackupAndDeleteDownloaded(ctx Context, bdir string, id ulid.ULID) error { // backupDownloaded is a helper function that uploads a TSDB block // found on disk to the given bucket. An error is returned if any operation // fails. -func backupDownloaded(ctx context.Context, logger log.Logger, bdir string, backupBkt objstore.Bucket, id ulid.ULID) error { +func backupDownloaded(ctx context.Context, logger log.Logger, bdir string, backupBkt objstore.Bucket, id ulid.ULID, enableBirthstone bool) error { // Safety checks. if _, err := os.Stat(filepath.Join(bdir, "meta.json")); err != nil { // If there is any error stat'ing meta.json inside the TSDB block @@ -137,7 +137,7 @@ func backupDownloaded(ctx context.Context, logger log.Logger, bdir string, backu // Upload the on disk TSDB block. level.Info(logger).Log("msg", "Uploading block to backup bucket", "id", id.String()) - if err := block.Upload(ctx, logger, backupBkt, bdir, metadata.NoneFunc); err != nil { + if err := block.Upload(ctx, logger, backupBkt, bdir, metadata.NoneFunc, enableBirthstone); err != nil { return errors.Wrap(err, "upload to backup") } diff --git a/pkg/verifier/verify.go b/pkg/verifier/verify.go index 3c8cc2ed1e3..c4b0b7a83c5 100644 --- a/pkg/verifier/verify.go +++ b/pkg/verifier/verify.go @@ -34,11 +34,12 @@ type VerifierRepairer interface { type Context struct { context.Context - Logger log.Logger - Bkt objstore.Bucket - BackupBkt objstore.Bucket - Fetcher block.MetadataFetcher - DeleteDelay time.Duration + Logger log.Logger + Bkt objstore.Bucket + BackupBkt objstore.Bucket + Fetcher block.MetadataFetcher + DeleteDelay time.Duration + EnableBirthstone bool metrics *metrics } @@ -112,14 +113,15 @@ idLoop: } // New returns verifier's manager. -func NewManager(reg prometheus.Registerer, logger log.Logger, bkt, backupBkt objstore.Bucket, fetcher block.MetadataFetcher, deleteDelay time.Duration, vs Registry) *Manager { +func NewManager(reg prometheus.Registerer, logger log.Logger, bkt, backupBkt objstore.Bucket, fetcher block.MetadataFetcher, deleteDelay time.Duration, enableBirthstone bool, vs Registry) *Manager { return &Manager{ Context: Context{ - Logger: logger, - Bkt: bkt, - BackupBkt: backupBkt, - Fetcher: fetcher, - DeleteDelay: deleteDelay, + Logger: logger, + Bkt: bkt, + BackupBkt: backupBkt, + Fetcher: fetcher, + DeleteDelay: deleteDelay, + EnableBirthstone: enableBirthstone, metrics: newVerifierMetrics(reg), },