Use FastFingerprint where appropriate.

This commit is contained in:
beorn7 2015-05-05 18:17:51 +02:00
parent f4d90a105e
commit 9820e5fe99
4 changed files with 20 additions and 20 deletions

View file

@ -50,9 +50,9 @@ func newTestPersistence(t *testing.T, encoding chunkEncoding) (*persistence, tes
func buildTestChunks(encoding chunkEncoding) map[clientmodel.Fingerprint][]chunk {
fps := clientmodel.Fingerprints{
m1.Fingerprint(),
m2.Fingerprint(),
m3.Fingerprint(),
m1.FastFingerprint(),
m2.FastFingerprint(),
m3.FastFingerprint(),
}
fpToChunks := map[clientmodel.Fingerprint][]chunk{}
@ -375,11 +375,11 @@ func testCheckpointAndLoadSeriesMapAndHeads(t *testing.T, encoding chunkEncoding
s5.persistWatermark = 3
chunkCountS4 := len(s4.chunkDescs)
chunkCountS5 := len(s5.chunkDescs)
sm.put(m1.Fingerprint(), s1)
sm.put(m2.Fingerprint(), s2)
sm.put(m3.Fingerprint(), s3)
sm.put(m4.Fingerprint(), s4)
sm.put(m5.Fingerprint(), s5)
sm.put(m1.FastFingerprint(), s1)
sm.put(m2.FastFingerprint(), s2)
sm.put(m3.FastFingerprint(), s3)
sm.put(m4.FastFingerprint(), s4)
sm.put(m5.FastFingerprint(), s5)
if err := p.checkpointSeriesMapAndHeads(sm, fpLocker); err != nil {
t.Fatal(err)
@ -392,7 +392,7 @@ func testCheckpointAndLoadSeriesMapAndHeads(t *testing.T, encoding chunkEncoding
if loadedSM.length() != 4 {
t.Errorf("want 4 series in map, got %d", loadedSM.length())
}
if loadedS1, ok := loadedSM.get(m1.Fingerprint()); ok {
if loadedS1, ok := loadedSM.get(m1.FastFingerprint()); ok {
if !reflect.DeepEqual(loadedS1.metric, m1) {
t.Errorf("want metric %v, got %v", m1, loadedS1.metric)
}
@ -408,7 +408,7 @@ func testCheckpointAndLoadSeriesMapAndHeads(t *testing.T, encoding chunkEncoding
} else {
t.Errorf("couldn't find %v in loaded map", m1)
}
if loadedS3, ok := loadedSM.get(m3.Fingerprint()); ok {
if loadedS3, ok := loadedSM.get(m3.FastFingerprint()); ok {
if !reflect.DeepEqual(loadedS3.metric, m3) {
t.Errorf("want metric %v, got %v", m3, loadedS3.metric)
}
@ -424,7 +424,7 @@ func testCheckpointAndLoadSeriesMapAndHeads(t *testing.T, encoding chunkEncoding
} else {
t.Errorf("couldn't find %v in loaded map", m3)
}
if loadedS4, ok := loadedSM.get(m4.Fingerprint()); ok {
if loadedS4, ok := loadedSM.get(m4.FastFingerprint()); ok {
if !reflect.DeepEqual(loadedS4.metric, m4) {
t.Errorf("want metric %v, got %v", m4, loadedS4.metric)
}
@ -449,7 +449,7 @@ func testCheckpointAndLoadSeriesMapAndHeads(t *testing.T, encoding chunkEncoding
} else {
t.Errorf("couldn't find %v in loaded map", m4)
}
if loadedS5, ok := loadedSM.get(m5.Fingerprint()); ok {
if loadedS5, ok := loadedSM.get(m5.FastFingerprint()); ok {
if !reflect.DeepEqual(loadedS5.metric, m5) {
t.Errorf("want metric %v, got %v", m5, loadedS5.metric)
}

View file

@ -318,7 +318,7 @@ func (s *memorySeries) preloadChunks(indexes []int, mss *memorySeriesStorage) ([
if s.chunkDescsOffset == -1 {
panic("requested loading chunks from persistence in a situation where we must not have persisted data for chunk descriptors in memory")
}
fp := s.metric.Fingerprint()
fp := s.metric.FastFingerprint() // TODO(beorn): Handle collisions.
chunks, err := mss.loadChunks(fp, loadIndexes, s.chunkDescsOffset)
if err != nil {
// Unpin the chunks since we won't return them as pinned chunks now.

View file

@ -382,7 +382,7 @@ func (s *memorySeriesStorage) Append(sample *clientmodel.Sample) {
}
glog.Warning("Sample ingestion resumed.")
}
fp := sample.Metric.Fingerprint()
fp := sample.Metric.FastFingerprint() // TODO(beorn): Handle collisions.
s.fpLocker.Lock(fp)
series := s.getOrCreateSeries(fp, sample.Metric)
completedChunksCount := series.add(&metric.SamplePair{

View file

@ -46,7 +46,7 @@ func TestGetFingerprintsForLabelMatchers(t *testing.T) {
Timestamp: clientmodel.Timestamp(i),
Value: clientmodel.SampleValue(i),
}
fingerprints[i] = metric.Fingerprint()
fingerprints[i] = metric.FastFingerprint()
}
for _, s := range samples {
storage.Append(s)
@ -172,7 +172,7 @@ func TestLoop(t *testing.T) {
storage.Append(s)
}
storage.WaitForIndexing()
series, _ := storage.(*memorySeriesStorage).fpToSeries.get(clientmodel.Metric{}.Fingerprint())
series, _ := storage.(*memorySeriesStorage).fpToSeries.get(clientmodel.Metric{}.FastFingerprint())
cdsBefore := len(series.chunkDescs)
time.Sleep(fpMaxWaitDuration + time.Second) // TODO(beorn7): Ugh, need to wait for maintenance to kick in.
cdsAfter := len(series.chunkDescs)
@ -251,7 +251,7 @@ func testGetValueAtTime(t *testing.T, encoding chunkEncoding) {
}
s.WaitForIndexing()
fp := clientmodel.Metric{}.Fingerprint()
fp := clientmodel.Metric{}.FastFingerprint()
it := s.NewIterator(fp)
@ -344,7 +344,7 @@ func testGetRangeValues(t *testing.T, encoding chunkEncoding) {
}
s.WaitForIndexing()
fp := clientmodel.Metric{}.Fingerprint()
fp := clientmodel.Metric{}.FastFingerprint()
it := s.NewIterator(fp)
@ -498,7 +498,7 @@ func testEvictAndPurgeSeries(t *testing.T, encoding chunkEncoding) {
}
s.WaitForIndexing()
fp := clientmodel.Metric{}.Fingerprint()
fp := clientmodel.Metric{}.FastFingerprint()
// Drop ~half of the chunks.
ms.maintainMemorySeries(fp, 1000)
@ -896,7 +896,7 @@ func verifyStorage(t testing.TB, s Storage, samples clientmodel.Samples, maxAge
// retention period, we can verify here that no results
// are returned.
}
fp := sample.Metric.Fingerprint()
fp := sample.Metric.FastFingerprint()
p := s.NewPreloader()
p.PreloadRange(fp, sample.Timestamp, sample.Timestamp, time.Hour)
found := s.NewIterator(fp).GetValueAtTime(sample.Timestamp)