2015-01-29 04:12:01 -08:00
|
|
|
// Copyright 2015 The Prometheus Authors
|
|
|
|
// Licensed under the Apache License, Version 2.0 (the "License");
|
|
|
|
// you may not use this file except in compliance with the License.
|
|
|
|
// You may obtain a copy of the License at
|
|
|
|
//
|
|
|
|
// http://www.apache.org/licenses/LICENSE-2.0
|
|
|
|
//
|
|
|
|
// Unless required by applicable law or agreed to in writing, software
|
|
|
|
// distributed under the License is distributed on an "AS IS" BASIS,
|
|
|
|
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
|
|
// See the License for the specific language governing permissions and
|
|
|
|
// limitations under the License.
|
|
|
|
|
|
|
|
package local
|
|
|
|
|
|
|
|
import (
|
Handle errors caused by data corruption more gracefully
This requires all the panic calls upon unexpected data to be converted
into errors returned. This pollute the function signatures quite
lot. Well, this is Go...
The ideas behind this are the following:
- panic only if it's a programming error. Data corruptions happen, and
they are not programming errors.
- If we detect a data corruption, we "quarantine" the series,
essentially removing it from the database and putting its data into
a separate directory for forensics.
- Failure during writing to a series file is not considered corruption
automatically. It will call setDirty, though, so that a
crashrecovery upon the next restart will commence and check for
that.
- Series quarantining and setDirty calls are logged and counted in
metrics, but are hidden from the user of the interfaces in
interface.go, whith the notable exception of Append(). The reasoning
is that we treat corruption by removing the corrupted series, i.e. a
query for it will return no results on its next call anyway, so
return no results right now. In the case of Append(), we want to
tell the user that no data has been appended, though.
Minor side effects:
- Now consistently using filepath.* instead of path.*.
- Introduced structured logging where I touched it. This makes things
less consistent, but a complete change to structured logging would
be out of scope for this PR.
2016-02-25 03:23:42 -08:00
|
|
|
"errors"
|
2015-01-29 04:12:01 -08:00
|
|
|
"fmt"
|
|
|
|
"io"
|
|
|
|
"os"
|
Handle errors caused by data corruption more gracefully
This requires all the panic calls upon unexpected data to be converted
into errors returned. This pollute the function signatures quite
lot. Well, this is Go...
The ideas behind this are the following:
- panic only if it's a programming error. Data corruptions happen, and
they are not programming errors.
- If we detect a data corruption, we "quarantine" the series,
essentially removing it from the database and putting its data into
a separate directory for forensics.
- Failure during writing to a series file is not considered corruption
automatically. It will call setDirty, though, so that a
crashrecovery upon the next restart will commence and check for
that.
- Series quarantining and setDirty calls are logged and counted in
metrics, but are hidden from the user of the interfaces in
interface.go, whith the notable exception of Append(). The reasoning
is that we treat corruption by removing the corrupted series, i.e. a
query for it will return no results on its next call anyway, so
return no results right now. In the case of Append(), we want to
tell the user that no data has been appended, though.
Minor side effects:
- Now consistently using filepath.* instead of path.*.
- Introduced structured logging where I touched it. This makes things
less consistent, but a complete change to structured logging would
be out of scope for this PR.
2016-02-25 03:23:42 -08:00
|
|
|
"path/filepath"
|
2015-01-29 04:12:01 -08:00
|
|
|
"strings"
|
2015-03-08 18:33:10 -07:00
|
|
|
"sync/atomic"
|
2015-01-29 04:12:01 -08:00
|
|
|
|
2015-10-03 01:21:43 -07:00
|
|
|
"github.com/prometheus/common/log"
|
2015-08-20 08:18:46 -07:00
|
|
|
"github.com/prometheus/common/model"
|
2015-01-29 04:12:01 -08:00
|
|
|
|
2016-09-21 14:44:27 -07:00
|
|
|
"github.com/prometheus/prometheus/storage/local/chunk"
|
2015-01-29 04:12:01 -08:00
|
|
|
"github.com/prometheus/prometheus/storage/local/codable"
|
|
|
|
"github.com/prometheus/prometheus/storage/local/index"
|
|
|
|
)
|
|
|
|
|
|
|
|
// recoverFromCrash is called by loadSeriesMapAndHeads if the persistence
|
|
|
|
// appears to be dirty after the loading (either because the loading resulted in
|
|
|
|
// an error or because the persistence was dirty from the start). Not goroutine
|
|
|
|
// safe. Only call before anything else is running (except index processing
|
|
|
|
// queue as started by newPersistence).
|
2015-08-20 08:18:46 -07:00
|
|
|
func (p *persistence) recoverFromCrash(fingerprintToSeries map[model.Fingerprint]*memorySeries) error {
|
2015-01-29 04:12:01 -08:00
|
|
|
// TODO(beorn): We need proper tests for the crash recovery.
|
2015-05-20 09:10:29 -07:00
|
|
|
log.Warn("Starting crash recovery. Prometheus is inoperational until complete.")
|
2015-08-17 05:26:53 -07:00
|
|
|
log.Warn("To avoid crash recovery in the future, shut down Prometheus with SIGTERM or a HTTP POST to /-/quit.")
|
2015-01-29 04:12:01 -08:00
|
|
|
|
2015-08-20 08:18:46 -07:00
|
|
|
fpsSeen := map[model.Fingerprint]struct{}{}
|
2015-01-29 04:12:01 -08:00
|
|
|
count := 0
|
|
|
|
seriesDirNameFmt := fmt.Sprintf("%%0%dx", seriesDirNameLen)
|
|
|
|
|
2015-05-07 09:58:14 -07:00
|
|
|
// Delete the fingerprint mapping file as it might be stale or
|
|
|
|
// corrupt. We'll rebuild the mappings as we go.
|
2015-09-12 16:06:40 -07:00
|
|
|
if err := os.RemoveAll(p.mappingsFileName()); err != nil {
|
|
|
|
return fmt.Errorf("couldn't remove old fingerprint mapping file %s: %s", p.mappingsFileName(), err)
|
|
|
|
}
|
2015-05-07 09:58:14 -07:00
|
|
|
// The mappings to rebuild.
|
|
|
|
fpm := fpMappings{}
|
|
|
|
|
2015-05-20 09:10:29 -07:00
|
|
|
log.Info("Scanning files.")
|
2015-01-29 04:12:01 -08:00
|
|
|
for i := 0; i < 1<<(seriesDirNameLen*4); i++ {
|
Handle errors caused by data corruption more gracefully
This requires all the panic calls upon unexpected data to be converted
into errors returned. This pollute the function signatures quite
lot. Well, this is Go...
The ideas behind this are the following:
- panic only if it's a programming error. Data corruptions happen, and
they are not programming errors.
- If we detect a data corruption, we "quarantine" the series,
essentially removing it from the database and putting its data into
a separate directory for forensics.
- Failure during writing to a series file is not considered corruption
automatically. It will call setDirty, though, so that a
crashrecovery upon the next restart will commence and check for
that.
- Series quarantining and setDirty calls are logged and counted in
metrics, but are hidden from the user of the interfaces in
interface.go, whith the notable exception of Append(). The reasoning
is that we treat corruption by removing the corrupted series, i.e. a
query for it will return no results on its next call anyway, so
return no results right now. In the case of Append(), we want to
tell the user that no data has been appended, though.
Minor side effects:
- Now consistently using filepath.* instead of path.*.
- Introduced structured logging where I touched it. This makes things
less consistent, but a complete change to structured logging would
be out of scope for this PR.
2016-02-25 03:23:42 -08:00
|
|
|
dirname := filepath.Join(p.basePath, fmt.Sprintf(seriesDirNameFmt, i))
|
2015-01-29 04:12:01 -08:00
|
|
|
dir, err := os.Open(dirname)
|
|
|
|
if os.IsNotExist(err) {
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
for fis := []os.FileInfo{}; err != io.EOF; fis, err = dir.Readdir(1024) {
|
|
|
|
if err != nil {
|
2016-11-13 07:45:51 -08:00
|
|
|
dir.Close()
|
2015-01-29 04:12:01 -08:00
|
|
|
return err
|
|
|
|
}
|
|
|
|
for _, fi := range fis {
|
2015-05-07 09:58:14 -07:00
|
|
|
fp, ok := p.sanitizeSeries(dirname, fi, fingerprintToSeries, fpm)
|
2015-01-29 04:12:01 -08:00
|
|
|
if ok {
|
|
|
|
fpsSeen[fp] = struct{}{}
|
|
|
|
}
|
|
|
|
count++
|
|
|
|
if count%10000 == 0 {
|
2015-05-20 09:10:29 -07:00
|
|
|
log.Infof("%d files scanned.", count)
|
2015-01-29 04:12:01 -08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
2016-11-13 07:45:51 -08:00
|
|
|
dir.Close()
|
2015-01-29 04:12:01 -08:00
|
|
|
}
|
2015-05-20 09:10:29 -07:00
|
|
|
log.Infof("File scan complete. %d series found.", len(fpsSeen))
|
2015-01-29 04:12:01 -08:00
|
|
|
|
2015-05-20 09:10:29 -07:00
|
|
|
log.Info("Checking for series without series file.")
|
2015-01-29 04:12:01 -08:00
|
|
|
for fp, s := range fingerprintToSeries {
|
|
|
|
if _, seen := fpsSeen[fp]; !seen {
|
|
|
|
// fp exists in fingerprintToSeries, but has no representation on disk.
|
2015-05-07 09:58:14 -07:00
|
|
|
if s.persistWatermark == len(s.chunkDescs) {
|
2015-03-08 18:33:10 -07:00
|
|
|
// Oops, everything including the head chunk was
|
|
|
|
// already persisted, but nothing on disk.
|
|
|
|
// Thus, we lost that series completely. Clean
|
|
|
|
// up the remnants.
|
2015-01-29 04:12:01 -08:00
|
|
|
delete(fingerprintToSeries, fp)
|
2015-02-26 06:19:44 -08:00
|
|
|
if err := p.purgeArchivedMetric(fp); err != nil {
|
|
|
|
// Purging the archived metric didn't work, so try
|
2015-01-29 04:12:01 -08:00
|
|
|
// to unindex it, just in case it's in the indexes.
|
|
|
|
p.unindexMetric(fp, s.metric)
|
|
|
|
}
|
2015-05-20 09:10:29 -07:00
|
|
|
log.Warnf("Lost series detected: fingerprint %v, metric %v.", fp, s.metric)
|
2015-01-29 04:12:01 -08:00
|
|
|
continue
|
|
|
|
}
|
2015-03-08 18:33:10 -07:00
|
|
|
// If we are here, the only chunks we have are the chunks in the checkpoint.
|
2015-01-29 04:12:01 -08:00
|
|
|
// Adjust things accordingly.
|
2015-03-08 18:33:10 -07:00
|
|
|
if s.persistWatermark > 0 || s.chunkDescsOffset != 0 {
|
|
|
|
minLostChunks := s.persistWatermark + s.chunkDescsOffset
|
2015-01-29 04:12:01 -08:00
|
|
|
if minLostChunks <= 0 {
|
2015-05-20 09:10:29 -07:00
|
|
|
log.Warnf(
|
2015-01-29 04:12:01 -08:00
|
|
|
"Possible loss of chunks for fingerprint %v, metric %v.",
|
|
|
|
fp, s.metric,
|
|
|
|
)
|
|
|
|
} else {
|
2015-05-20 09:10:29 -07:00
|
|
|
log.Warnf(
|
2015-01-29 04:12:01 -08:00
|
|
|
"Lost at least %d chunks for fingerprint %v, metric %v.",
|
|
|
|
minLostChunks, fp, s.metric,
|
|
|
|
)
|
|
|
|
}
|
2015-03-08 18:33:10 -07:00
|
|
|
s.chunkDescs = append(
|
2016-09-21 14:44:27 -07:00
|
|
|
make([]*chunk.Desc, 0, len(s.chunkDescs)-s.persistWatermark),
|
2015-03-08 18:33:10 -07:00
|
|
|
s.chunkDescs[s.persistWatermark:]...,
|
|
|
|
)
|
2016-09-28 14:33:34 -07:00
|
|
|
chunk.NumMemDescs.Sub(float64(s.persistWatermark))
|
2015-03-08 18:33:10 -07:00
|
|
|
s.persistWatermark = 0
|
2015-01-29 04:12:01 -08:00
|
|
|
s.chunkDescsOffset = 0
|
|
|
|
}
|
2015-05-07 09:58:14 -07:00
|
|
|
maybeAddMapping(fp, s.metric, fpm)
|
2015-01-29 04:12:01 -08:00
|
|
|
fpsSeen[fp] = struct{}{} // Add so that fpsSeen is complete.
|
|
|
|
}
|
|
|
|
}
|
2015-05-20 09:10:29 -07:00
|
|
|
log.Info("Check for series without series file complete.")
|
2015-01-29 04:12:01 -08:00
|
|
|
|
2015-05-07 09:58:14 -07:00
|
|
|
if err := p.cleanUpArchiveIndexes(fingerprintToSeries, fpsSeen, fpm); err != nil {
|
2015-01-29 04:12:01 -08:00
|
|
|
return err
|
|
|
|
}
|
|
|
|
if err := p.rebuildLabelIndexes(fingerprintToSeries); err != nil {
|
|
|
|
return err
|
|
|
|
}
|
2015-05-07 09:58:14 -07:00
|
|
|
// Finally rewrite the mappings file if there are any mappings.
|
|
|
|
if len(fpm) > 0 {
|
|
|
|
if err := p.checkpointFPMappings(fpm); err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
}
|
2015-01-29 04:12:01 -08:00
|
|
|
|
2016-03-09 09:56:30 -08:00
|
|
|
p.dirtyMtx.Lock()
|
|
|
|
// Only declare storage clean if it didn't become dirty during crash recovery.
|
|
|
|
if !p.becameDirty {
|
|
|
|
p.dirty = false
|
|
|
|
}
|
|
|
|
p.dirtyMtx.Unlock()
|
|
|
|
|
2015-05-20 09:10:29 -07:00
|
|
|
log.Warn("Crash recovery complete.")
|
2015-01-29 04:12:01 -08:00
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
|
|
|
// sanitizeSeries sanitizes a series based on its series file as defined by the
|
|
|
|
// provided directory and FileInfo. The method returns the fingerprint as
|
|
|
|
// derived from the directory and file name, and whether the provided file has
|
2015-01-29 05:52:12 -08:00
|
|
|
// been sanitized. A file that failed to be sanitized is moved into the
|
|
|
|
// "orphaned" sub-directory, if possible.
|
2015-01-29 04:12:01 -08:00
|
|
|
//
|
|
|
|
// The following steps are performed:
|
|
|
|
//
|
|
|
|
// - A file whose name doesn't comply with the naming scheme of a series file is
|
2015-01-29 05:52:12 -08:00
|
|
|
// simply moved into the orphaned directory.
|
2015-01-29 04:12:01 -08:00
|
|
|
//
|
|
|
|
// - If the size of the series file isn't a multiple of the chunk size,
|
|
|
|
// extraneous bytes are truncated. If the truncation fails, the file is
|
2015-01-29 05:52:12 -08:00
|
|
|
// moved into the orphaned directory.
|
2015-01-29 04:12:01 -08:00
|
|
|
//
|
|
|
|
// - A file that is empty (after truncation) is deleted.
|
|
|
|
//
|
|
|
|
// - A series that is not archived (i.e. it is in the fingerprintToSeries map)
|
2015-03-08 18:33:10 -07:00
|
|
|
// is checked for consistency of its various parameters (like persist
|
|
|
|
// watermark, offset of chunkDescs etc.). In particular, overlap between an
|
|
|
|
// in-memory head chunk with the most recent persisted chunk is
|
2015-01-29 04:12:01 -08:00
|
|
|
// checked. Inconsistencies are rectified.
|
|
|
|
//
|
2015-01-29 05:52:12 -08:00
|
|
|
// - A series that is archived (i.e. it is not in the fingerprintToSeries map)
|
2015-01-29 04:12:01 -08:00
|
|
|
// is checked for its presence in the index of archived series. If it cannot
|
2015-01-29 05:52:12 -08:00
|
|
|
// be found there, it is moved into the orphaned directory.
|
2015-03-08 18:33:10 -07:00
|
|
|
func (p *persistence) sanitizeSeries(
|
2015-05-07 09:58:14 -07:00
|
|
|
dirname string, fi os.FileInfo,
|
2015-08-20 08:18:46 -07:00
|
|
|
fingerprintToSeries map[model.Fingerprint]*memorySeries,
|
2015-05-07 09:58:14 -07:00
|
|
|
fpm fpMappings,
|
2015-08-20 08:18:46 -07:00
|
|
|
) (model.Fingerprint, bool) {
|
Handle errors caused by data corruption more gracefully
This requires all the panic calls upon unexpected data to be converted
into errors returned. This pollute the function signatures quite
lot. Well, this is Go...
The ideas behind this are the following:
- panic only if it's a programming error. Data corruptions happen, and
they are not programming errors.
- If we detect a data corruption, we "quarantine" the series,
essentially removing it from the database and putting its data into
a separate directory for forensics.
- Failure during writing to a series file is not considered corruption
automatically. It will call setDirty, though, so that a
crashrecovery upon the next restart will commence and check for
that.
- Series quarantining and setDirty calls are logged and counted in
metrics, but are hidden from the user of the interfaces in
interface.go, whith the notable exception of Append(). The reasoning
is that we treat corruption by removing the corrupted series, i.e. a
query for it will return no results on its next call anyway, so
return no results right now. In the case of Append(), we want to
tell the user that no data has been appended, though.
Minor side effects:
- Now consistently using filepath.* instead of path.*.
- Introduced structured logging where I touched it. This makes things
less consistent, but a complete change to structured logging would
be out of scope for this PR.
2016-02-25 03:23:42 -08:00
|
|
|
var (
|
|
|
|
fp model.Fingerprint
|
|
|
|
err error
|
|
|
|
filename = filepath.Join(dirname, fi.Name())
|
|
|
|
s *memorySeries
|
|
|
|
)
|
|
|
|
|
2015-01-29 04:12:01 -08:00
|
|
|
purge := func() {
|
Handle errors caused by data corruption more gracefully
This requires all the panic calls upon unexpected data to be converted
into errors returned. This pollute the function signatures quite
lot. Well, this is Go...
The ideas behind this are the following:
- panic only if it's a programming error. Data corruptions happen, and
they are not programming errors.
- If we detect a data corruption, we "quarantine" the series,
essentially removing it from the database and putting its data into
a separate directory for forensics.
- Failure during writing to a series file is not considered corruption
automatically. It will call setDirty, though, so that a
crashrecovery upon the next restart will commence and check for
that.
- Series quarantining and setDirty calls are logged and counted in
metrics, but are hidden from the user of the interfaces in
interface.go, whith the notable exception of Append(). The reasoning
is that we treat corruption by removing the corrupted series, i.e. a
query for it will return no results on its next call anyway, so
return no results right now. In the case of Append(), we want to
tell the user that no data has been appended, though.
Minor side effects:
- Now consistently using filepath.* instead of path.*.
- Introduced structured logging where I touched it. This makes things
less consistent, but a complete change to structured logging would
be out of scope for this PR.
2016-02-25 03:23:42 -08:00
|
|
|
if fp != 0 {
|
|
|
|
var metric model.Metric
|
|
|
|
if s != nil {
|
|
|
|
metric = s.metric
|
2015-01-29 05:52:12 -08:00
|
|
|
}
|
Handle errors caused by data corruption more gracefully
This requires all the panic calls upon unexpected data to be converted
into errors returned. This pollute the function signatures quite
lot. Well, this is Go...
The ideas behind this are the following:
- panic only if it's a programming error. Data corruptions happen, and
they are not programming errors.
- If we detect a data corruption, we "quarantine" the series,
essentially removing it from the database and putting its data into
a separate directory for forensics.
- Failure during writing to a series file is not considered corruption
automatically. It will call setDirty, though, so that a
crashrecovery upon the next restart will commence and check for
that.
- Series quarantining and setDirty calls are logged and counted in
metrics, but are hidden from the user of the interfaces in
interface.go, whith the notable exception of Append(). The reasoning
is that we treat corruption by removing the corrupted series, i.e. a
query for it will return no results on its next call anyway, so
return no results right now. In the case of Append(), we want to
tell the user that no data has been appended, though.
Minor side effects:
- Now consistently using filepath.* instead of path.*.
- Introduced structured logging where I touched it. This makes things
less consistent, but a complete change to structured logging would
be out of scope for this PR.
2016-02-25 03:23:42 -08:00
|
|
|
if err = p.quarantineSeriesFile(
|
|
|
|
fp, errors.New("purge during crash recovery"), metric,
|
|
|
|
); err == nil {
|
|
|
|
return
|
|
|
|
}
|
|
|
|
log.
|
|
|
|
With("file", filename).
|
|
|
|
With("error", err).
|
|
|
|
Error("Failed to move lost series file to orphaned directory.")
|
2015-01-29 05:52:12 -08:00
|
|
|
}
|
Handle errors caused by data corruption more gracefully
This requires all the panic calls upon unexpected data to be converted
into errors returned. This pollute the function signatures quite
lot. Well, this is Go...
The ideas behind this are the following:
- panic only if it's a programming error. Data corruptions happen, and
they are not programming errors.
- If we detect a data corruption, we "quarantine" the series,
essentially removing it from the database and putting its data into
a separate directory for forensics.
- Failure during writing to a series file is not considered corruption
automatically. It will call setDirty, though, so that a
crashrecovery upon the next restart will commence and check for
that.
- Series quarantining and setDirty calls are logged and counted in
metrics, but are hidden from the user of the interfaces in
interface.go, whith the notable exception of Append(). The reasoning
is that we treat corruption by removing the corrupted series, i.e. a
query for it will return no results on its next call anyway, so
return no results right now. In the case of Append(), we want to
tell the user that no data has been appended, though.
Minor side effects:
- Now consistently using filepath.* instead of path.*.
- Introduced structured logging where I touched it. This makes things
less consistent, but a complete change to structured logging would
be out of scope for this PR.
2016-02-25 03:23:42 -08:00
|
|
|
// If we are here, we are either purging an incorrectly named
|
|
|
|
// file, or quarantining has failed. So simply delete the file.
|
|
|
|
if err = os.Remove(filename); err != nil {
|
|
|
|
log.
|
|
|
|
With("file", filename).
|
|
|
|
With("error", err).
|
|
|
|
Error("Failed to delete lost series file.")
|
2015-01-29 05:52:12 -08:00
|
|
|
}
|
2015-01-29 04:12:01 -08:00
|
|
|
}
|
|
|
|
|
|
|
|
if len(fi.Name()) != fpLen-seriesDirNameLen+len(seriesFileSuffix) ||
|
|
|
|
!strings.HasSuffix(fi.Name(), seriesFileSuffix) {
|
2015-05-20 09:10:29 -07:00
|
|
|
log.Warnf("Unexpected series file name %s.", filename)
|
2015-01-29 04:12:01 -08:00
|
|
|
purge()
|
|
|
|
return fp, false
|
|
|
|
}
|
Handle errors caused by data corruption more gracefully
This requires all the panic calls upon unexpected data to be converted
into errors returned. This pollute the function signatures quite
lot. Well, this is Go...
The ideas behind this are the following:
- panic only if it's a programming error. Data corruptions happen, and
they are not programming errors.
- If we detect a data corruption, we "quarantine" the series,
essentially removing it from the database and putting its data into
a separate directory for forensics.
- Failure during writing to a series file is not considered corruption
automatically. It will call setDirty, though, so that a
crashrecovery upon the next restart will commence and check for
that.
- Series quarantining and setDirty calls are logged and counted in
metrics, but are hidden from the user of the interfaces in
interface.go, whith the notable exception of Append(). The reasoning
is that we treat corruption by removing the corrupted series, i.e. a
query for it will return no results on its next call anyway, so
return no results right now. In the case of Append(), we want to
tell the user that no data has been appended, though.
Minor side effects:
- Now consistently using filepath.* instead of path.*.
- Introduced structured logging where I touched it. This makes things
less consistent, but a complete change to structured logging would
be out of scope for this PR.
2016-02-25 03:23:42 -08:00
|
|
|
if fp, err = model.FingerprintFromString(filepath.Base(dirname) + fi.Name()[:fpLen-seriesDirNameLen]); err != nil {
|
2015-05-20 09:10:29 -07:00
|
|
|
log.Warnf("Error parsing file name %s: %s", filename, err)
|
2015-01-29 04:12:01 -08:00
|
|
|
purge()
|
|
|
|
return fp, false
|
|
|
|
}
|
|
|
|
|
2015-04-13 11:20:26 -07:00
|
|
|
bytesToTrim := fi.Size() % int64(chunkLenWithHeader)
|
|
|
|
chunksInFile := int(fi.Size()) / chunkLenWithHeader
|
2015-03-19 04:59:26 -07:00
|
|
|
modTime := fi.ModTime()
|
2015-01-29 04:12:01 -08:00
|
|
|
if bytesToTrim != 0 {
|
2015-05-20 09:10:29 -07:00
|
|
|
log.Warnf(
|
2015-01-29 04:12:01 -08:00
|
|
|
"Truncating file %s to exactly %d chunks, trimming %d extraneous bytes.",
|
|
|
|
filename, chunksInFile, bytesToTrim,
|
|
|
|
)
|
|
|
|
f, err := os.OpenFile(filename, os.O_WRONLY, 0640)
|
|
|
|
if err != nil {
|
2015-05-20 09:10:29 -07:00
|
|
|
log.Errorf("Could not open file %s: %s", filename, err)
|
2015-01-29 04:12:01 -08:00
|
|
|
purge()
|
|
|
|
return fp, false
|
|
|
|
}
|
|
|
|
if err := f.Truncate(fi.Size() - bytesToTrim); err != nil {
|
2015-05-20 09:10:29 -07:00
|
|
|
log.Errorf("Failed to truncate file %s: %s", filename, err)
|
2015-01-29 04:12:01 -08:00
|
|
|
purge()
|
|
|
|
return fp, false
|
|
|
|
}
|
|
|
|
}
|
|
|
|
if chunksInFile == 0 {
|
2015-05-20 09:10:29 -07:00
|
|
|
log.Warnf("No chunks left in file %s.", filename)
|
2015-01-29 04:12:01 -08:00
|
|
|
purge()
|
|
|
|
return fp, false
|
|
|
|
}
|
|
|
|
|
|
|
|
s, ok := fingerprintToSeries[fp]
|
|
|
|
if ok { // This series is supposed to not be archived.
|
|
|
|
if s == nil {
|
|
|
|
panic("fingerprint mapped to nil pointer")
|
|
|
|
}
|
2015-05-07 09:58:14 -07:00
|
|
|
maybeAddMapping(fp, s.metric, fpm)
|
2015-03-19 04:03:09 -07:00
|
|
|
if !p.pedanticChecks &&
|
|
|
|
bytesToTrim == 0 &&
|
|
|
|
s.chunkDescsOffset != -1 &&
|
2015-03-19 04:59:26 -07:00
|
|
|
chunksInFile == s.chunkDescsOffset+s.persistWatermark &&
|
|
|
|
modTime.Equal(s.modTime) {
|
2015-01-29 04:12:01 -08:00
|
|
|
// Everything is consistent. We are good.
|
|
|
|
return fp, true
|
|
|
|
}
|
2015-03-18 11:09:07 -07:00
|
|
|
// If we are here, we cannot be sure the series file is
|
|
|
|
// consistent with the checkpoint, so we have to take a closer
|
|
|
|
// look.
|
2015-03-08 18:33:10 -07:00
|
|
|
if s.headChunkClosed {
|
2015-07-13 12:12:27 -07:00
|
|
|
// This is the easy case as we have all chunks on
|
|
|
|
// disk. Treat this series as a freshly unarchived one
|
|
|
|
// by loading the chunkDescs and setting all parameters
|
|
|
|
// based on the loaded chunkDescs.
|
2015-07-06 16:10:14 -07:00
|
|
|
cds, err := p.loadChunkDescs(fp, 0)
|
2015-07-13 12:12:27 -07:00
|
|
|
if err != nil {
|
|
|
|
log.Errorf(
|
|
|
|
"Failed to load chunk descriptors for metric %v, fingerprint %v: %s",
|
|
|
|
s.metric, fp, err,
|
|
|
|
)
|
|
|
|
purge()
|
|
|
|
return fp, false
|
|
|
|
}
|
2015-05-20 09:10:29 -07:00
|
|
|
log.Warnf(
|
2015-01-29 04:12:01 -08:00
|
|
|
"Treating recovered metric %v, fingerprint %v, as freshly unarchived, with %d chunks in series file.",
|
2015-07-13 12:12:27 -07:00
|
|
|
s.metric, fp, len(cds),
|
2015-01-29 04:12:01 -08:00
|
|
|
)
|
2015-07-13 12:12:27 -07:00
|
|
|
s.chunkDescs = cds
|
|
|
|
s.chunkDescsOffset = 0
|
2016-09-21 14:44:27 -07:00
|
|
|
s.savedFirstTime = cds[0].FirstTime()
|
|
|
|
s.lastTime, err = cds[len(cds)-1].LastTime()
|
Handle errors caused by data corruption more gracefully
This requires all the panic calls upon unexpected data to be converted
into errors returned. This pollute the function signatures quite
lot. Well, this is Go...
The ideas behind this are the following:
- panic only if it's a programming error. Data corruptions happen, and
they are not programming errors.
- If we detect a data corruption, we "quarantine" the series,
essentially removing it from the database and putting its data into
a separate directory for forensics.
- Failure during writing to a series file is not considered corruption
automatically. It will call setDirty, though, so that a
crashrecovery upon the next restart will commence and check for
that.
- Series quarantining and setDirty calls are logged and counted in
metrics, but are hidden from the user of the interfaces in
interface.go, whith the notable exception of Append(). The reasoning
is that we treat corruption by removing the corrupted series, i.e. a
query for it will return no results on its next call anyway, so
return no results right now. In the case of Append(), we want to
tell the user that no data has been appended, though.
Minor side effects:
- Now consistently using filepath.* instead of path.*.
- Introduced structured logging where I touched it. This makes things
less consistent, but a complete change to structured logging would
be out of scope for this PR.
2016-02-25 03:23:42 -08:00
|
|
|
if err != nil {
|
|
|
|
log.Errorf(
|
|
|
|
"Failed to determine time of the last sample for metric %v, fingerprint %v: %s",
|
|
|
|
s.metric, fp, err,
|
|
|
|
)
|
|
|
|
purge()
|
|
|
|
return fp, false
|
|
|
|
}
|
2015-07-13 12:12:27 -07:00
|
|
|
s.persistWatermark = len(cds)
|
2015-03-19 04:59:26 -07:00
|
|
|
s.modTime = modTime
|
2015-01-29 04:12:01 -08:00
|
|
|
return fp, true
|
|
|
|
}
|
2015-03-08 18:33:10 -07:00
|
|
|
// This is the tricky one: We have chunks from heads.db, but
|
|
|
|
// some of those chunks might already be in the series
|
|
|
|
// file. Strategy: Take the last time of the most recent chunk
|
|
|
|
// in the series file. Then find the oldest chunk among those
|
|
|
|
// from heads.db that has a first time later or equal to the
|
|
|
|
// last time from the series file. Throw away the older chunks
|
|
|
|
// from heads.db and stitch the parts together.
|
|
|
|
|
|
|
|
// First, throw away the chunkDescs without chunks.
|
|
|
|
s.chunkDescs = s.chunkDescs[s.persistWatermark:]
|
2016-09-28 14:33:34 -07:00
|
|
|
chunk.NumMemDescs.Sub(float64(s.persistWatermark))
|
2015-07-06 16:10:14 -07:00
|
|
|
cds, err := p.loadChunkDescs(fp, 0)
|
2015-01-29 04:12:01 -08:00
|
|
|
if err != nil {
|
2015-05-20 09:10:29 -07:00
|
|
|
log.Errorf(
|
2015-01-29 04:12:01 -08:00
|
|
|
"Failed to load chunk descriptors for metric %v, fingerprint %v: %s",
|
|
|
|
s.metric, fp, err,
|
|
|
|
)
|
|
|
|
purge()
|
|
|
|
return fp, false
|
|
|
|
}
|
2015-03-08 18:33:10 -07:00
|
|
|
s.persistWatermark = len(cds)
|
|
|
|
s.chunkDescsOffset = 0
|
2016-09-21 14:44:27 -07:00
|
|
|
s.savedFirstTime = cds[0].FirstTime()
|
2015-03-19 04:59:26 -07:00
|
|
|
s.modTime = modTime
|
2015-03-08 18:33:10 -07:00
|
|
|
|
2016-09-21 14:44:27 -07:00
|
|
|
lastTime, err := cds[len(cds)-1].LastTime()
|
Handle errors caused by data corruption more gracefully
This requires all the panic calls upon unexpected data to be converted
into errors returned. This pollute the function signatures quite
lot. Well, this is Go...
The ideas behind this are the following:
- panic only if it's a programming error. Data corruptions happen, and
they are not programming errors.
- If we detect a data corruption, we "quarantine" the series,
essentially removing it from the database and putting its data into
a separate directory for forensics.
- Failure during writing to a series file is not considered corruption
automatically. It will call setDirty, though, so that a
crashrecovery upon the next restart will commence and check for
that.
- Series quarantining and setDirty calls are logged and counted in
metrics, but are hidden from the user of the interfaces in
interface.go, whith the notable exception of Append(). The reasoning
is that we treat corruption by removing the corrupted series, i.e. a
query for it will return no results on its next call anyway, so
return no results right now. In the case of Append(), we want to
tell the user that no data has been appended, though.
Minor side effects:
- Now consistently using filepath.* instead of path.*.
- Introduced structured logging where I touched it. This makes things
less consistent, but a complete change to structured logging would
be out of scope for this PR.
2016-02-25 03:23:42 -08:00
|
|
|
if err != nil {
|
|
|
|
log.Errorf(
|
|
|
|
"Failed to determine time of the last sample for metric %v, fingerprint %v: %s",
|
|
|
|
s.metric, fp, err,
|
|
|
|
)
|
|
|
|
purge()
|
|
|
|
return fp, false
|
|
|
|
}
|
2015-03-08 18:33:10 -07:00
|
|
|
keepIdx := -1
|
|
|
|
for i, cd := range s.chunkDescs {
|
2016-09-21 14:44:27 -07:00
|
|
|
if cd.FirstTime() >= lastTime {
|
2015-03-08 18:33:10 -07:00
|
|
|
keepIdx = i
|
|
|
|
break
|
|
|
|
}
|
|
|
|
}
|
|
|
|
if keepIdx == -1 {
|
2015-05-20 09:10:29 -07:00
|
|
|
log.Warnf(
|
2015-03-08 18:33:10 -07:00
|
|
|
"Recovered metric %v, fingerprint %v: all %d chunks recovered from series file.",
|
2015-01-29 04:12:01 -08:00
|
|
|
s.metric, fp, chunksInFile,
|
|
|
|
)
|
2016-09-28 14:33:34 -07:00
|
|
|
chunk.NumMemDescs.Sub(float64(len(s.chunkDescs)))
|
2016-09-21 14:44:27 -07:00
|
|
|
atomic.AddInt64(&chunk.NumMemChunks, int64(-len(s.chunkDescs)))
|
2015-01-29 04:12:01 -08:00
|
|
|
s.chunkDescs = cds
|
2015-03-08 18:33:10 -07:00
|
|
|
s.headChunkClosed = true
|
|
|
|
return fp, true
|
2015-01-29 04:12:01 -08:00
|
|
|
}
|
2015-05-20 09:10:29 -07:00
|
|
|
log.Warnf(
|
2015-03-08 18:33:10 -07:00
|
|
|
"Recovered metric %v, fingerprint %v: recovered %d chunks from series file, recovered %d chunks from checkpoint.",
|
|
|
|
s.metric, fp, chunksInFile, len(s.chunkDescs)-keepIdx,
|
|
|
|
)
|
2016-09-28 14:33:34 -07:00
|
|
|
chunk.NumMemDescs.Sub(float64(keepIdx))
|
2016-09-21 14:44:27 -07:00
|
|
|
atomic.AddInt64(&chunk.NumMemChunks, int64(-keepIdx))
|
2016-07-07 07:17:38 -07:00
|
|
|
if keepIdx == len(s.chunkDescs) {
|
|
|
|
// No chunks from series file left, head chunk is evicted, so declare it closed.
|
|
|
|
s.headChunkClosed = true
|
|
|
|
}
|
2015-03-08 18:33:10 -07:00
|
|
|
s.chunkDescs = append(cds, s.chunkDescs[keepIdx:]...)
|
2015-01-29 04:12:01 -08:00
|
|
|
return fp, true
|
|
|
|
}
|
|
|
|
// This series is supposed to be archived.
|
2015-05-20 10:13:06 -07:00
|
|
|
metric, err := p.archivedMetric(fp)
|
2015-01-29 04:12:01 -08:00
|
|
|
if err != nil {
|
2015-05-20 09:10:29 -07:00
|
|
|
log.Errorf(
|
2015-01-29 04:12:01 -08:00
|
|
|
"Fingerprint %v assumed archived but couldn't be looked up in archived index: %s",
|
|
|
|
fp, err,
|
|
|
|
)
|
|
|
|
purge()
|
|
|
|
return fp, false
|
|
|
|
}
|
|
|
|
if metric == nil {
|
2015-05-20 09:10:29 -07:00
|
|
|
log.Warnf(
|
2015-01-29 04:12:01 -08:00
|
|
|
"Fingerprint %v assumed archived but couldn't be found in archived index.",
|
|
|
|
fp,
|
|
|
|
)
|
|
|
|
purge()
|
|
|
|
return fp, false
|
|
|
|
}
|
|
|
|
// This series looks like a properly archived one.
|
2015-05-07 09:58:14 -07:00
|
|
|
maybeAddMapping(fp, metric, fpm)
|
2015-01-29 04:12:01 -08:00
|
|
|
return fp, true
|
|
|
|
}
|
|
|
|
|
|
|
|
func (p *persistence) cleanUpArchiveIndexes(
|
2015-08-20 08:18:46 -07:00
|
|
|
fpToSeries map[model.Fingerprint]*memorySeries,
|
|
|
|
fpsSeen map[model.Fingerprint]struct{},
|
2015-05-07 09:58:14 -07:00
|
|
|
fpm fpMappings,
|
2015-01-29 04:12:01 -08:00
|
|
|
) error {
|
2015-05-20 09:10:29 -07:00
|
|
|
log.Info("Cleaning up archive indexes.")
|
2015-01-29 04:12:01 -08:00
|
|
|
var fp codable.Fingerprint
|
|
|
|
var m codable.Metric
|
|
|
|
count := 0
|
|
|
|
if err := p.archivedFingerprintToMetrics.ForEach(func(kv index.KeyValueAccessor) error {
|
|
|
|
count++
|
|
|
|
if count%10000 == 0 {
|
2015-05-20 09:10:29 -07:00
|
|
|
log.Infof("%d archived metrics checked.", count)
|
2015-01-29 04:12:01 -08:00
|
|
|
}
|
|
|
|
if err := kv.Key(&fp); err != nil {
|
|
|
|
return err
|
|
|
|
}
|
2015-08-20 08:18:46 -07:00
|
|
|
_, fpSeen := fpsSeen[model.Fingerprint(fp)]
|
2015-01-29 04:12:01 -08:00
|
|
|
inMemory := false
|
|
|
|
if fpSeen {
|
2015-08-20 08:18:46 -07:00
|
|
|
_, inMemory = fpToSeries[model.Fingerprint(fp)]
|
2015-01-29 04:12:01 -08:00
|
|
|
}
|
|
|
|
if !fpSeen || inMemory {
|
|
|
|
if inMemory {
|
2015-08-20 08:18:46 -07:00
|
|
|
log.Warnf("Archive clean-up: Fingerprint %v is not archived. Purging from archive indexes.", model.Fingerprint(fp))
|
2015-01-29 04:12:01 -08:00
|
|
|
}
|
|
|
|
if !fpSeen {
|
2015-08-20 08:18:46 -07:00
|
|
|
log.Warnf("Archive clean-up: Fingerprint %v is unknown. Purging from archive indexes.", model.Fingerprint(fp))
|
2015-01-29 04:12:01 -08:00
|
|
|
}
|
|
|
|
// It's fine if the fp is not in the archive indexes.
|
|
|
|
if _, err := p.archivedFingerprintToMetrics.Delete(fp); err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
// Delete from timerange index, too.
|
|
|
|
_, err := p.archivedFingerprintToTimeRange.Delete(fp)
|
|
|
|
return err
|
|
|
|
}
|
2015-05-07 09:58:14 -07:00
|
|
|
// fp is legitimately archived. Now we need the metric to check for a mapped fingerprint.
|
|
|
|
if err := kv.Value(&m); err != nil {
|
|
|
|
return err
|
|
|
|
}
|
2015-08-20 08:18:46 -07:00
|
|
|
maybeAddMapping(model.Fingerprint(fp), model.Metric(m), fpm)
|
2015-05-07 09:58:14 -07:00
|
|
|
// Make sure it is in timerange index, too.
|
2015-01-29 04:12:01 -08:00
|
|
|
has, err := p.archivedFingerprintToTimeRange.Has(fp)
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
if has {
|
|
|
|
return nil // All good.
|
|
|
|
}
|
2015-05-20 09:10:29 -07:00
|
|
|
log.Warnf("Archive clean-up: Fingerprint %v is not in time-range index. Unarchiving it for recovery.")
|
2015-01-29 04:12:01 -08:00
|
|
|
// Again, it's fine if fp is not in the archive index.
|
|
|
|
if _, err := p.archivedFingerprintToMetrics.Delete(fp); err != nil {
|
|
|
|
return err
|
|
|
|
}
|
2015-08-20 08:18:46 -07:00
|
|
|
cds, err := p.loadChunkDescs(model.Fingerprint(fp), 0)
|
2015-01-29 04:12:01 -08:00
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
Handle errors caused by data corruption more gracefully
This requires all the panic calls upon unexpected data to be converted
into errors returned. This pollute the function signatures quite
lot. Well, this is Go...
The ideas behind this are the following:
- panic only if it's a programming error. Data corruptions happen, and
they are not programming errors.
- If we detect a data corruption, we "quarantine" the series,
essentially removing it from the database and putting its data into
a separate directory for forensics.
- Failure during writing to a series file is not considered corruption
automatically. It will call setDirty, though, so that a
crashrecovery upon the next restart will commence and check for
that.
- Series quarantining and setDirty calls are logged and counted in
metrics, but are hidden from the user of the interfaces in
interface.go, whith the notable exception of Append(). The reasoning
is that we treat corruption by removing the corrupted series, i.e. a
query for it will return no results on its next call anyway, so
return no results right now. In the case of Append(), we want to
tell the user that no data has been appended, though.
Minor side effects:
- Now consistently using filepath.* instead of path.*.
- Introduced structured logging where I touched it. This makes things
less consistent, but a complete change to structured logging would
be out of scope for this PR.
2016-02-25 03:23:42 -08:00
|
|
|
series, err := newMemorySeries(model.Metric(m), cds, p.seriesFileModTime(model.Fingerprint(fp)))
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
2015-08-20 08:18:46 -07:00
|
|
|
fpToSeries[model.Fingerprint(fp)] = series
|
2015-01-29 04:12:01 -08:00
|
|
|
return nil
|
|
|
|
}); err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
count = 0
|
|
|
|
if err := p.archivedFingerprintToTimeRange.ForEach(func(kv index.KeyValueAccessor) error {
|
|
|
|
count++
|
|
|
|
if count%10000 == 0 {
|
2015-05-20 09:10:29 -07:00
|
|
|
log.Infof("%d archived time ranges checked.", count)
|
2015-01-29 04:12:01 -08:00
|
|
|
}
|
|
|
|
if err := kv.Key(&fp); err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
has, err := p.archivedFingerprintToMetrics.Has(fp)
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
if has {
|
|
|
|
return nil // All good.
|
|
|
|
}
|
2015-05-20 09:10:29 -07:00
|
|
|
log.Warnf("Archive clean-up: Purging unknown fingerprint %v in time-range index.", fp)
|
2015-01-29 04:12:01 -08:00
|
|
|
deleted, err := p.archivedFingerprintToTimeRange.Delete(fp)
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
if !deleted {
|
2015-05-20 09:10:29 -07:00
|
|
|
log.Errorf("Fingerprint %v to be deleted from archivedFingerprintToTimeRange not found. This should never happen.", fp)
|
2015-01-29 04:12:01 -08:00
|
|
|
}
|
|
|
|
return nil
|
|
|
|
}); err != nil {
|
|
|
|
return err
|
|
|
|
}
|
2015-05-20 09:10:29 -07:00
|
|
|
log.Info("Clean-up of archive indexes complete.")
|
2015-01-29 04:12:01 -08:00
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
|
|
|
func (p *persistence) rebuildLabelIndexes(
|
2015-08-20 08:18:46 -07:00
|
|
|
fpToSeries map[model.Fingerprint]*memorySeries,
|
2015-01-29 04:12:01 -08:00
|
|
|
) error {
|
|
|
|
count := 0
|
2015-05-20 09:10:29 -07:00
|
|
|
log.Info("Rebuilding label indexes.")
|
|
|
|
log.Info("Indexing metrics in memory.")
|
2015-01-29 04:12:01 -08:00
|
|
|
for fp, s := range fpToSeries {
|
|
|
|
p.indexMetric(fp, s.metric)
|
|
|
|
count++
|
|
|
|
if count%10000 == 0 {
|
2015-05-20 09:10:29 -07:00
|
|
|
log.Infof("%d metrics queued for indexing.", count)
|
2015-01-29 04:12:01 -08:00
|
|
|
}
|
|
|
|
}
|
2015-05-20 09:10:29 -07:00
|
|
|
log.Info("Indexing archived metrics.")
|
2015-01-29 04:12:01 -08:00
|
|
|
var fp codable.Fingerprint
|
|
|
|
var m codable.Metric
|
|
|
|
if err := p.archivedFingerprintToMetrics.ForEach(func(kv index.KeyValueAccessor) error {
|
|
|
|
if err := kv.Key(&fp); err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
if err := kv.Value(&m); err != nil {
|
|
|
|
return err
|
|
|
|
}
|
2015-08-20 08:18:46 -07:00
|
|
|
p.indexMetric(model.Fingerprint(fp), model.Metric(m))
|
2015-01-29 04:12:01 -08:00
|
|
|
count++
|
|
|
|
if count%10000 == 0 {
|
2015-05-20 09:10:29 -07:00
|
|
|
log.Infof("%d metrics queued for indexing.", count)
|
2015-01-29 04:12:01 -08:00
|
|
|
}
|
|
|
|
return nil
|
|
|
|
}); err != nil {
|
|
|
|
return err
|
|
|
|
}
|
2015-05-20 09:10:29 -07:00
|
|
|
log.Info("All requests for rebuilding the label indexes queued. (Actual processing may lag behind.)")
|
2015-01-29 04:12:01 -08:00
|
|
|
return nil
|
|
|
|
}
|
2015-05-07 09:58:14 -07:00
|
|
|
|
|
|
|
// maybeAddMapping adds a fingerprint mapping to fpm if the FastFingerprint of m is different from fp.
|
2015-08-20 08:18:46 -07:00
|
|
|
func maybeAddMapping(fp model.Fingerprint, m model.Metric, fpm fpMappings) {
|
2015-05-07 09:58:14 -07:00
|
|
|
if rawFP := m.FastFingerprint(); rawFP != fp {
|
2015-05-20 09:10:29 -07:00
|
|
|
log.Warnf(
|
2015-05-07 09:58:14 -07:00
|
|
|
"Metric %v with fingerprint %v is mapped from raw fingerprint %v.",
|
|
|
|
m, fp, rawFP,
|
|
|
|
)
|
|
|
|
if mappedFPs, ok := fpm[rawFP]; ok {
|
|
|
|
mappedFPs[metricToUniqueString(m)] = fp
|
|
|
|
} else {
|
2015-08-20 08:18:46 -07:00
|
|
|
fpm[rawFP] = map[string]model.Fingerprint{
|
2015-05-07 09:58:14 -07:00
|
|
|
metricToUniqueString(m): fp,
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|