diff --git a/.github/.github/workflows/test.yml b/.github/.github/workflows/test.yml index 0542d20638..507b8a8735 100644 --- a/.github/.github/workflows/test.yml +++ b/.github/.github/workflows/test.yml @@ -11,8 +11,8 @@ jobs: - name: Upgrade golang run: | cd /tmp - wget https://dl.google.com/go/go1.17.7.linux-amd64.tar.gz - tar -zxvf go1.17.7.linux-amd64.tar.gz + wget https://dl.google.com/go/go1.18.7.linux-amd64.tar.gz + tar -zxvf go1.18.7.linux-amd64.tar.gz sudo rm -fr /usr/local/go sudo mv /tmp/go /usr/local/go cd - diff --git a/.github/workflows/test.yml b/.github/workflows/test.yml index 0542d20638..507b8a8735 100644 --- a/.github/workflows/test.yml +++ b/.github/workflows/test.yml @@ -11,8 +11,8 @@ jobs: - name: Upgrade golang run: | cd /tmp - wget https://dl.google.com/go/go1.17.7.linux-amd64.tar.gz - tar -zxvf go1.17.7.linux-amd64.tar.gz + wget https://dl.google.com/go/go1.18.7.linux-amd64.tar.gz + tar -zxvf go1.18.7.linux-amd64.tar.gz sudo rm -fr /usr/local/go sudo mv /tmp/go /usr/local/go cd - diff --git a/model/textparse/openmetricsparse_test.go b/model/textparse/openmetricsparse_test.go index fdb70c566b..c032dfd1a8 100644 --- a/model/textparse/openmetricsparse_test.go +++ b/model/textparse/openmetricsparse_test.go @@ -597,7 +597,7 @@ func TestOMNullByteHandling(t *testing.T) { }, { input: "a{b\x00=\"hiih\"} 1", - err: "expected equal, got \"INVALID\"", + err: "expected equal, got \"INVALID\"", }, { input: "a\x00{b=\"ddd\"} 1", diff --git a/model/textparse/promparse_test.go b/model/textparse/promparse_test.go index 6a1216da16..a30176a58e 100644 --- a/model/textparse/promparse_test.go +++ b/model/textparse/promparse_test.go @@ -322,7 +322,7 @@ func TestPromNullByteHandling(t *testing.T) { }, { input: "a{b\x00=\"hiih\"} 1", - err: "expected equal, got \"INVALID\"", + err: "expected equal, got \"INVALID\"", }, { input: "a\x00{b=\"ddd\"} 1", diff --git a/rules/alerting_test.go b/rules/alerting_test.go index 767c2d7e07..947a85dc7a 100644 --- a/rules/alerting_test.go +++ b/rules/alerting_test.go @@ -470,7 +470,7 @@ instance: {{ $v.Labels.instance }}, value: {{ printf "%.0f" $v.Value }}; close(getDoneCh) }() _, err = ruleWithQueryInTemplate.Eval( - suite.Context(), evalTime, slowQueryFunc, nil, 0, + suite.Context(), 0, evalTime, slowQueryFunc, nil, 0, ) require.NoError(t, err) } diff --git a/tsdb/db.go b/tsdb/db.go index 3595727936..0a46b4e600 100644 --- a/tsdb/db.go +++ b/tsdb/db.go @@ -1400,10 +1400,6 @@ func (db *DB) reloadBlocks() (err error) { return nil } -func (db *DB) AllowOverlappingQueries() bool { - return db.opts.AllowOverlappingQueries || db.oooWasEnabled.Load() -} - func openBlocks(l log.Logger, dir string, loaded []*Block, chunkPool chunkenc.Pool, cache *hashcache.SeriesHashCache) (blocks []*Block, corrupted map[ulid.ULID]error, err error) { bDirs, err := blockDirs(dir) if err != nil { diff --git a/tsdb/db_test.go b/tsdb/db_test.go index 5496e6f2b5..02a753506a 100644 --- a/tsdb/db_test.go +++ b/tsdb/db_test.go @@ -3970,7 +3970,6 @@ func TestMetadataAssertInMemoryData(t *testing.T) { // TODO(codesome): test more samples incoming once compaction has started. To verify new samples after the start // // are not included in this compaction. ->>>>>>> upstream/main func TestOOOCompaction(t *testing.T) { dir := t.TempDir() diff --git a/tsdb/head.go b/tsdb/head.go index 9f340b42ad..420aa542bd 100644 --- a/tsdb/head.go +++ b/tsdb/head.go @@ -960,27 +960,6 @@ func (h *Head) updateMinOOOMaxOOOTime(mint, maxt int64) { } } -func (h *Head) updateMinOOOMaxOOOTime(mint, maxt int64) { - for { - lt := h.MinOOOTime() - if mint >= lt { - break - } - if h.minOOOTime.CAS(lt, mint) { - break - } - } - for { - ht := h.MaxOOOTime() - if maxt <= ht { - break - } - if h.maxOOOTime.CAS(ht, maxt) { - break - } - } -} - // SetMinValidTime sets the minimum timestamp the head can ingest. func (h *Head) SetMinValidTime(minValidTime int64) { h.minValidTime.Store(minValidTime) diff --git a/tsdb/head_append.go b/tsdb/head_append.go index c1e7ac6296..9a3d746aa5 100644 --- a/tsdb/head_append.go +++ b/tsdb/head_append.go @@ -877,7 +877,7 @@ func (s *memSeries) cutNewHeadChunk(mint int64, chunkDiskMapper chunkDiskMapper, return s.headChunk } -func (s *memSeries) cutNewOOOHeadChunk(mint int64, chunkDiskMapper *chunks.ChunkDiskMapper) (*oooHeadChunk, chunks.ChunkDiskMapperRef) { +func (s *memSeries) cutNewOOOHeadChunk(mint int64, chunkDiskMapper chunkDiskMapper) (*oooHeadChunk, chunks.ChunkDiskMapperRef) { ref := s.mmapCurrentOOOHeadChunk(chunkDiskMapper) s.oooHeadChunk = &oooHeadChunk{ diff --git a/tsdb/head_read.go b/tsdb/head_read.go index 7bc159cbae..82b4829ee0 100644 --- a/tsdb/head_read.go +++ b/tsdb/head_read.go @@ -644,7 +644,7 @@ func (c *safeChunk) Iterator(reuseIter chunkenc.Iterator) chunkenc.Iterator { // iterator returns a chunk iterator for the requested chunkID, or a NopIterator if the requested ID is out of range. // It is unsafe to call this concurrently with s.append(...) without holding the series lock. -func (s *memSeries) iterator(id chunks.HeadChunkID, isoState *isolationState, chunkDiskMapper *chunks.ChunkDiskMapper, memChunkPool *sync.Pool, it chunkenc.Iterator) chunkenc.Iterator { +func (s *memSeries) iterator(id chunks.HeadChunkID, isoState *isolationState, chunkDiskMapper chunkDiskMapper, memChunkPool *sync.Pool, it chunkenc.Iterator) chunkenc.Iterator { c, garbageCollect, err := s.chunk(id, chunkDiskMapper, memChunkPool) // TODO(fabxc): Work around! An error will be returns when a querier have retrieved a pointer to a // series's chunk, which got then garbage collected before it got