Rename Address to URL

The "Address" is actually a URL which may contain username and
password. Calling this Address is misleading so we rename it.

Change-Id: I441c7ab9dfa2ceedc67cde7a47e6843a65f60511
This commit is contained in:
Johannes 'fish' Ziemke 2014-12-17 19:40:59 +01:00
parent c4bcfeccc5
commit ff95a52b0f
6 changed files with 75 additions and 75 deletions

View file

@ -122,13 +122,13 @@ type Target interface {
State() TargetState State() TargetState
// Return the last time a scrape was attempted. // Return the last time a scrape was attempted.
LastScrape() time.Time LastScrape() time.Time
// The address to which the Target corresponds. Out of all of the available // The URL to which the Target corresponds. Out of all of the available
// points in this interface, this one is the best candidate to change given // points in this interface, this one is the best candidate to change given
// the ways to express the endpoint. // the ways to express the endpoint.
Address() string URL() string
// The address as seen from other hosts. References to localhost are resolved // The URL as seen from other hosts. References to localhost are resolved
// to the address of the prometheus server. // to the address of the prometheus server.
GlobalAddress() string GlobalURL() string
// Return the target's base labels. // Return the target's base labels.
BaseLabels() clientmodel.LabelSet BaseLabels() clientmodel.LabelSet
// SetBaseLabelsFrom queues a replacement of the current base labels by // SetBaseLabelsFrom queues a replacement of the current base labels by
@ -158,7 +158,7 @@ type target struct {
// Channel to queue base labels to be replaced. // Channel to queue base labels to be replaced.
newBaseLabels chan clientmodel.LabelSet newBaseLabels chan clientmodel.LabelSet
address string url string
// What is the deadline for the HTTP or HTTPS against this endpoint. // What is the deadline for the HTTP or HTTPS against this endpoint.
Deadline time.Duration Deadline time.Duration
// Any base labels that are added to this target and its metrics. // Any base labels that are added to this target and its metrics.
@ -175,9 +175,9 @@ type target struct {
} }
// NewTarget creates a reasonably configured target for querying. // NewTarget creates a reasonably configured target for querying.
func NewTarget(address string, deadline time.Duration, baseLabels clientmodel.LabelSet) Target { func NewTarget(url string, deadline time.Duration, baseLabels clientmodel.LabelSet) Target {
target := &target{ target := &target{
address: address, url: url,
Deadline: deadline, Deadline: deadline,
baseLabels: baseLabels, baseLabels: baseLabels,
httpClient: utility.NewDeadlineClient(deadline), httpClient: utility.NewDeadlineClient(deadline),
@ -195,7 +195,7 @@ func (t *target) recordScrapeHealth(ingester extraction.Ingester, timestamp clie
metric[label] = value metric[label] = value
} }
metric[clientmodel.MetricNameLabel] = clientmodel.LabelValue(ScrapeHealthMetricName) metric[clientmodel.MetricNameLabel] = clientmodel.LabelValue(ScrapeHealthMetricName)
metric[InstanceLabel] = clientmodel.LabelValue(t.Address()) metric[InstanceLabel] = clientmodel.LabelValue(t.URL())
healthValue := clientmodel.SampleValue(0) healthValue := clientmodel.SampleValue(0)
if healthy { if healthy {
@ -295,7 +295,7 @@ func (t *target) scrape(ingester extraction.Ingester) (err error) {
ms := float64(time.Since(start)) / float64(time.Millisecond) ms := float64(time.Since(start)) / float64(time.Millisecond)
labels := prometheus.Labels{ labels := prometheus.Labels{
job: string(t.baseLabels[clientmodel.JobLabel]), job: string(t.baseLabels[clientmodel.JobLabel]),
instance: t.Address(), instance: t.URL(),
outcome: success, outcome: success,
} }
t.Lock() // Writing t.state and t.lastError requires the lock. t.Lock() // Writing t.state and t.lastError requires the lock.
@ -311,7 +311,7 @@ func (t *target) scrape(ingester extraction.Ingester) (err error) {
t.recordScrapeHealth(ingester, timestamp, err == nil) t.recordScrapeHealth(ingester, timestamp, err == nil)
}(time.Now()) }(time.Now())
req, err := http.NewRequest("GET", t.Address(), nil) req, err := http.NewRequest("GET", t.URL(), nil)
if err != nil { if err != nil {
panic(err) panic(err)
} }
@ -331,7 +331,7 @@ func (t *target) scrape(ingester extraction.Ingester) (err error) {
return err return err
} }
baseLabels := clientmodel.LabelSet{InstanceLabel: clientmodel.LabelValue(t.Address())} baseLabels := clientmodel.LabelSet{InstanceLabel: clientmodel.LabelValue(t.URL())}
for baseLabel, baseValue := range t.baseLabels { for baseLabel, baseValue := range t.baseLabels {
baseLabels[baseLabel] = baseValue baseLabels[baseLabel] = baseValue
} }
@ -369,23 +369,23 @@ func (t *target) LastScrape() time.Time {
return t.lastScrape return t.lastScrape
} }
// Address implements Target. // URL implements Target.
func (t *target) Address() string { func (t *target) URL() string {
return t.address return t.url
} }
// GlobalAddress implements Target. // GlobalURL implements Target.
func (t *target) GlobalAddress() string { func (t *target) GlobalURL() string {
address := t.address url := t.url
hostname, err := os.Hostname() hostname, err := os.Hostname()
if err != nil { if err != nil {
glog.Warningf("Couldn't get hostname: %s, returning target.Address()", err) glog.Warningf("Couldn't get hostname: %s, returning target.URL()", err)
return address return url
} }
for _, localhostRepresentation := range localhostRepresentations { for _, localhostRepresentation := range localhostRepresentations {
address = strings.Replace(address, localhostRepresentation, fmt.Sprintf("http://%s", hostname), -1) url = strings.Replace(url, localhostRepresentation, fmt.Sprintf("http://%s", hostname), -1)
} }
return address return url
} }
// BaseLabels implements Target. // BaseLabels implements Target.
@ -397,7 +397,7 @@ func (t *target) BaseLabels() clientmodel.LabelSet {
// SetBaseLabelsFrom implements Target. // SetBaseLabelsFrom implements Target.
func (t *target) SetBaseLabelsFrom(newTarget Target) { func (t *target) SetBaseLabelsFrom(newTarget Target) {
if t.Address() != newTarget.Address() { if t.URL() != newTarget.URL() {
panic("targets don't refer to the same endpoint") panic("targets don't refer to the same endpoint")
} }
t.newBaseLabels <- newTarget.BaseLabels() t.newBaseLabels <- newTarget.BaseLabels()

View file

@ -39,7 +39,7 @@ func (i *collectResultIngester) Ingest(r *extraction.Result) error {
func TestTargetScrapeUpdatesState(t *testing.T) { func TestTargetScrapeUpdatesState(t *testing.T) {
testTarget := target{ testTarget := target{
state: Unknown, state: Unknown,
address: "bad schema", url: "bad schema",
httpClient: utility.NewDeadlineClient(0), httpClient: utility.NewDeadlineClient(0),
} }
testTarget.scrape(nopIngester{}) testTarget.scrape(nopIngester{})
@ -50,7 +50,7 @@ func TestTargetScrapeUpdatesState(t *testing.T) {
func TestTargetRecordScrapeHealth(t *testing.T) { func TestTargetRecordScrapeHealth(t *testing.T) {
testTarget := target{ testTarget := target{
address: "http://example.url", url: "http://example.url",
baseLabels: clientmodel.LabelSet{clientmodel.JobLabel: "testjob"}, baseLabels: clientmodel.LabelSet{clientmodel.JobLabel: "testjob"},
httpClient: utility.NewDeadlineClient(0), httpClient: utility.NewDeadlineClient(0),
} }
@ -147,7 +147,7 @@ func TestTargetScrape404(t *testing.T) {
func TestTargetRunScraperScrapes(t *testing.T) { func TestTargetRunScraperScrapes(t *testing.T) {
testTarget := target{ testTarget := target{
state: Unknown, state: Unknown,
address: "bad schema", url: "bad schema",
httpClient: utility.NewDeadlineClient(0), httpClient: utility.NewDeadlineClient(0),
scraperStopping: make(chan struct{}), scraperStopping: make(chan struct{}),
scraperStopped: make(chan struct{}), scraperStopped: make(chan struct{}),

View file

@ -38,12 +38,12 @@ func (t fakeTarget) LastError() error {
return nil return nil
} }
func (t fakeTarget) Address() string { func (t fakeTarget) URL() string {
return "fake" return "fake"
} }
func (t fakeTarget) GlobalAddress() string { func (t fakeTarget) GlobalURL() string {
return t.Address() return t.URL()
} }
func (t fakeTarget) BaseLabels() clientmodel.LabelSet { func (t fakeTarget) BaseLabels() clientmodel.LabelSet {

View file

@ -31,11 +31,11 @@ const (
type TargetPool struct { type TargetPool struct {
sync.RWMutex sync.RWMutex
manager TargetManager manager TargetManager
targetsByAddress map[string]Target targetsByURL map[string]Target
interval time.Duration interval time.Duration
ingester extraction.Ingester ingester extraction.Ingester
addTargetQueue chan Target addTargetQueue chan Target
targetProvider TargetProvider targetProvider TargetProvider
@ -45,14 +45,14 @@ type TargetPool struct {
// NewTargetPool creates a TargetPool, ready to be started by calling Run. // NewTargetPool creates a TargetPool, ready to be started by calling Run.
func NewTargetPool(m TargetManager, p TargetProvider, ing extraction.Ingester, i time.Duration) *TargetPool { func NewTargetPool(m TargetManager, p TargetProvider, ing extraction.Ingester, i time.Duration) *TargetPool {
return &TargetPool{ return &TargetPool{
manager: m, manager: m,
interval: i, interval: i,
ingester: ing, ingester: ing,
targetsByAddress: make(map[string]Target), targetsByURL: make(map[string]Target),
addTargetQueue: make(chan Target, targetAddQueueSize), addTargetQueue: make(chan Target, targetAddQueueSize),
targetProvider: p, targetProvider: p,
stopping: make(chan struct{}), stopping: make(chan struct{}),
stopped: make(chan struct{}), stopped: make(chan struct{}),
} }
} }
@ -98,7 +98,7 @@ func (p *TargetPool) addTarget(target Target) {
p.Lock() p.Lock()
defer p.Unlock() defer p.Unlock()
p.targetsByAddress[target.Address()] = target p.targetsByURL[target.URL()] = target
go target.RunScraper(p.ingester, p.interval) go target.RunScraper(p.ingester, p.interval)
} }
@ -109,21 +109,21 @@ func (p *TargetPool) ReplaceTargets(newTargets []Target) {
p.Lock() p.Lock()
defer p.Unlock() defer p.Unlock()
newTargetAddresses := make(utility.Set) newTargetURLs := make(utility.Set)
for _, newTarget := range newTargets { for _, newTarget := range newTargets {
newTargetAddresses.Add(newTarget.Address()) newTargetURLs.Add(newTarget.URL())
oldTarget, ok := p.targetsByAddress[newTarget.Address()] oldTarget, ok := p.targetsByURL[newTarget.URL()]
if ok { if ok {
oldTarget.SetBaseLabelsFrom(newTarget) oldTarget.SetBaseLabelsFrom(newTarget)
} else { } else {
p.targetsByAddress[newTarget.Address()] = newTarget p.targetsByURL[newTarget.URL()] = newTarget
go newTarget.RunScraper(p.ingester, p.interval) go newTarget.RunScraper(p.ingester, p.interval)
} }
} }
var wg sync.WaitGroup var wg sync.WaitGroup
for k, oldTarget := range p.targetsByAddress { for k, oldTarget := range p.targetsByURL {
if !newTargetAddresses.Has(k) { if !newTargetURLs.Has(k) {
wg.Add(1) wg.Add(1)
go func(k string, oldTarget Target) { go func(k string, oldTarget Target) {
defer wg.Done() defer wg.Done()
@ -131,7 +131,7 @@ func (p *TargetPool) ReplaceTargets(newTargets []Target) {
oldTarget.StopScraper() oldTarget.StopScraper()
glog.V(1).Infof("Scraper for target %s stopped.", k) glog.V(1).Infof("Scraper for target %s stopped.", k)
}(k, oldTarget) }(k, oldTarget)
delete(p.targetsByAddress, k) delete(p.targetsByURL, k)
} }
} }
wg.Wait() wg.Wait()
@ -142,8 +142,8 @@ func (p *TargetPool) Targets() []Target {
p.RLock() p.RLock()
defer p.RUnlock() defer p.RUnlock()
targets := make([]Target, 0, len(p.targetsByAddress)) targets := make([]Target, 0, len(p.targetsByURL))
for _, v := range p.targetsByAddress { for _, v := range p.targetsByURL {
targets = append(targets, v) targets = append(targets, v)
} }
return targets return targets

View file

@ -27,12 +27,12 @@ func testTargetPool(t testing.TB) {
} }
type input struct { type input struct {
address string url string
scheduledFor time.Time scheduledFor time.Time
} }
type output struct { type output struct {
address string url string
} }
var scenarios = []struct { var scenarios = []struct {
@ -49,12 +49,12 @@ func testTargetPool(t testing.TB) {
name: "single element", name: "single element",
inputs: []input{ inputs: []input{
{ {
address: "single1", url: "single1",
}, },
}, },
outputs: []output{ outputs: []output{
{ {
address: "single1", url: "single1",
}, },
}, },
}, },
@ -62,18 +62,18 @@ func testTargetPool(t testing.TB) {
name: "plural schedules", name: "plural schedules",
inputs: []input{ inputs: []input{
{ {
address: "plural1", url: "plural1",
}, },
{ {
address: "plural2", url: "plural2",
}, },
}, },
outputs: []output{ outputs: []output{
{ {
address: "plural1", url: "plural1",
}, },
{ {
address: "plural2", url: "plural2",
}, },
}, },
}, },
@ -84,24 +84,24 @@ func testTargetPool(t testing.TB) {
for _, input := range scenario.inputs { for _, input := range scenario.inputs {
target := target{ target := target{
address: input.address, url: input.url,
newBaseLabels: make(chan clientmodel.LabelSet, 1), newBaseLabels: make(chan clientmodel.LabelSet, 1),
httpClient: &http.Client{}, httpClient: &http.Client{},
} }
pool.addTarget(&target) pool.addTarget(&target)
} }
if len(pool.targetsByAddress) != len(scenario.outputs) { if len(pool.targetsByURL) != len(scenario.outputs) {
t.Errorf("%s %d. expected TargetPool size to be %d but was %d", scenario.name, i, len(scenario.outputs), len(pool.targetsByAddress)) t.Errorf("%s %d. expected TargetPool size to be %d but was %d", scenario.name, i, len(scenario.outputs), len(pool.targetsByURL))
} else { } else {
for j, output := range scenario.outputs { for j, output := range scenario.outputs {
if target, ok := pool.targetsByAddress[output.address]; !ok { if target, ok := pool.targetsByURL[output.url]; !ok {
t.Errorf("%s %d.%d. expected Target address to be %s but was %s", scenario.name, i, j, output.address, target.Address()) t.Errorf("%s %d.%d. expected Target url to be %s but was %s", scenario.name, i, j, output.url, target.URL())
} }
} }
if len(pool.targetsByAddress) != len(scenario.outputs) { if len(pool.targetsByURL) != len(scenario.outputs) {
t.Errorf("%s %d. expected to repopulated with %d elements, got %d", scenario.name, i, len(scenario.outputs), len(pool.targetsByAddress)) t.Errorf("%s %d. expected to repopulated with %d elements, got %d", scenario.name, i, len(scenario.outputs), len(pool.targetsByURL))
} }
} }
} }
@ -114,7 +114,7 @@ func TestTargetPool(t *testing.T) {
func TestTargetPoolReplaceTargets(t *testing.T) { func TestTargetPoolReplaceTargets(t *testing.T) {
pool := NewTargetPool(nil, nil, nopIngester{}, time.Duration(1)) pool := NewTargetPool(nil, nil, nopIngester{}, time.Duration(1))
oldTarget1 := &target{ oldTarget1 := &target{
address: "example1", url: "example1",
state: Unreachable, state: Unreachable,
scraperStopping: make(chan struct{}), scraperStopping: make(chan struct{}),
scraperStopped: make(chan struct{}), scraperStopped: make(chan struct{}),
@ -122,7 +122,7 @@ func TestTargetPoolReplaceTargets(t *testing.T) {
httpClient: &http.Client{}, httpClient: &http.Client{},
} }
oldTarget2 := &target{ oldTarget2 := &target{
address: "example2", url: "example2",
state: Unreachable, state: Unreachable,
scraperStopping: make(chan struct{}), scraperStopping: make(chan struct{}),
scraperStopped: make(chan struct{}), scraperStopped: make(chan struct{}),
@ -130,7 +130,7 @@ func TestTargetPoolReplaceTargets(t *testing.T) {
httpClient: &http.Client{}, httpClient: &http.Client{},
} }
newTarget1 := &target{ newTarget1 := &target{
address: "example1", url: "example1",
state: Alive, state: Alive,
scraperStopping: make(chan struct{}), scraperStopping: make(chan struct{}),
scraperStopped: make(chan struct{}), scraperStopped: make(chan struct{}),
@ -138,7 +138,7 @@ func TestTargetPoolReplaceTargets(t *testing.T) {
httpClient: &http.Client{}, httpClient: &http.Client{},
} }
newTarget2 := &target{ newTarget2 := &target{
address: "example3", url: "example3",
state: Alive, state: Alive,
scraperStopping: make(chan struct{}), scraperStopping: make(chan struct{}),
scraperStopped: make(chan struct{}), scraperStopped: make(chan struct{}),
@ -151,14 +151,14 @@ func TestTargetPoolReplaceTargets(t *testing.T) {
pool.ReplaceTargets([]Target{newTarget1, newTarget2}) pool.ReplaceTargets([]Target{newTarget1, newTarget2})
if len(pool.targetsByAddress) != 2 { if len(pool.targetsByURL) != 2 {
t.Errorf("Expected 2 elements in pool, had %d", len(pool.targetsByAddress)) t.Errorf("Expected 2 elements in pool, had %d", len(pool.targetsByURL))
} }
if pool.targetsByAddress["example1"].State() != oldTarget1.State() { if pool.targetsByURL["example1"].State() != oldTarget1.State() {
t.Errorf("target1 channel has changed") t.Errorf("target1 channel has changed")
} }
if pool.targetsByAddress["example3"].State() == oldTarget2.State() { if pool.targetsByURL["example3"].State() == oldTarget2.State() {
t.Errorf("newTarget2 channel same as oldTarget2's") t.Errorf("newTarget2 channel same as oldTarget2's")
} }

View file

@ -47,7 +47,7 @@
{{range $pool.Targets}} {{range $pool.Targets}}
<tr> <tr>
<td> <td>
<a href="{{.GlobalAddress}}">{{.Address}}</a> <a href="{{.GlobalURL}}">{{.URL}}</a>
</td> </td>
<td> <td>
{{.State}} {{.State}}