Rename Address to URL

The "Address" is actually a URL which may contain username and
password. Calling this Address is misleading so we rename it.

Change-Id: I441c7ab9dfa2ceedc67cde7a47e6843a65f60511
This commit is contained in:
Johannes 'fish' Ziemke 2014-12-17 19:40:59 +01:00
parent c4bcfeccc5
commit ff95a52b0f
6 changed files with 75 additions and 75 deletions

View file

@ -122,13 +122,13 @@ type Target interface {
State() TargetState
// Return the last time a scrape was attempted.
LastScrape() time.Time
// The address to which the Target corresponds. Out of all of the available
// The URL to which the Target corresponds. Out of all of the available
// points in this interface, this one is the best candidate to change given
// the ways to express the endpoint.
Address() string
// The address as seen from other hosts. References to localhost are resolved
URL() string
// The URL as seen from other hosts. References to localhost are resolved
// to the address of the prometheus server.
GlobalAddress() string
GlobalURL() string
// Return the target's base labels.
BaseLabels() clientmodel.LabelSet
// SetBaseLabelsFrom queues a replacement of the current base labels by
@ -158,7 +158,7 @@ type target struct {
// Channel to queue base labels to be replaced.
newBaseLabels chan clientmodel.LabelSet
address string
url string
// What is the deadline for the HTTP or HTTPS against this endpoint.
Deadline time.Duration
// Any base labels that are added to this target and its metrics.
@ -175,9 +175,9 @@ type target struct {
}
// NewTarget creates a reasonably configured target for querying.
func NewTarget(address string, deadline time.Duration, baseLabels clientmodel.LabelSet) Target {
func NewTarget(url string, deadline time.Duration, baseLabels clientmodel.LabelSet) Target {
target := &target{
address: address,
url: url,
Deadline: deadline,
baseLabels: baseLabels,
httpClient: utility.NewDeadlineClient(deadline),
@ -195,7 +195,7 @@ func (t *target) recordScrapeHealth(ingester extraction.Ingester, timestamp clie
metric[label] = value
}
metric[clientmodel.MetricNameLabel] = clientmodel.LabelValue(ScrapeHealthMetricName)
metric[InstanceLabel] = clientmodel.LabelValue(t.Address())
metric[InstanceLabel] = clientmodel.LabelValue(t.URL())
healthValue := clientmodel.SampleValue(0)
if healthy {
@ -295,7 +295,7 @@ func (t *target) scrape(ingester extraction.Ingester) (err error) {
ms := float64(time.Since(start)) / float64(time.Millisecond)
labels := prometheus.Labels{
job: string(t.baseLabels[clientmodel.JobLabel]),
instance: t.Address(),
instance: t.URL(),
outcome: success,
}
t.Lock() // Writing t.state and t.lastError requires the lock.
@ -311,7 +311,7 @@ func (t *target) scrape(ingester extraction.Ingester) (err error) {
t.recordScrapeHealth(ingester, timestamp, err == nil)
}(time.Now())
req, err := http.NewRequest("GET", t.Address(), nil)
req, err := http.NewRequest("GET", t.URL(), nil)
if err != nil {
panic(err)
}
@ -331,7 +331,7 @@ func (t *target) scrape(ingester extraction.Ingester) (err error) {
return err
}
baseLabels := clientmodel.LabelSet{InstanceLabel: clientmodel.LabelValue(t.Address())}
baseLabels := clientmodel.LabelSet{InstanceLabel: clientmodel.LabelValue(t.URL())}
for baseLabel, baseValue := range t.baseLabels {
baseLabels[baseLabel] = baseValue
}
@ -369,23 +369,23 @@ func (t *target) LastScrape() time.Time {
return t.lastScrape
}
// Address implements Target.
func (t *target) Address() string {
return t.address
// URL implements Target.
func (t *target) URL() string {
return t.url
}
// GlobalAddress implements Target.
func (t *target) GlobalAddress() string {
address := t.address
// GlobalURL implements Target.
func (t *target) GlobalURL() string {
url := t.url
hostname, err := os.Hostname()
if err != nil {
glog.Warningf("Couldn't get hostname: %s, returning target.Address()", err)
return address
glog.Warningf("Couldn't get hostname: %s, returning target.URL()", err)
return url
}
for _, localhostRepresentation := range localhostRepresentations {
address = strings.Replace(address, localhostRepresentation, fmt.Sprintf("http://%s", hostname), -1)
url = strings.Replace(url, localhostRepresentation, fmt.Sprintf("http://%s", hostname), -1)
}
return address
return url
}
// BaseLabels implements Target.
@ -397,7 +397,7 @@ func (t *target) BaseLabels() clientmodel.LabelSet {
// SetBaseLabelsFrom implements Target.
func (t *target) SetBaseLabelsFrom(newTarget Target) {
if t.Address() != newTarget.Address() {
if t.URL() != newTarget.URL() {
panic("targets don't refer to the same endpoint")
}
t.newBaseLabels <- newTarget.BaseLabels()

View file

@ -39,7 +39,7 @@ func (i *collectResultIngester) Ingest(r *extraction.Result) error {
func TestTargetScrapeUpdatesState(t *testing.T) {
testTarget := target{
state: Unknown,
address: "bad schema",
url: "bad schema",
httpClient: utility.NewDeadlineClient(0),
}
testTarget.scrape(nopIngester{})
@ -50,7 +50,7 @@ func TestTargetScrapeUpdatesState(t *testing.T) {
func TestTargetRecordScrapeHealth(t *testing.T) {
testTarget := target{
address: "http://example.url",
url: "http://example.url",
baseLabels: clientmodel.LabelSet{clientmodel.JobLabel: "testjob"},
httpClient: utility.NewDeadlineClient(0),
}
@ -147,7 +147,7 @@ func TestTargetScrape404(t *testing.T) {
func TestTargetRunScraperScrapes(t *testing.T) {
testTarget := target{
state: Unknown,
address: "bad schema",
url: "bad schema",
httpClient: utility.NewDeadlineClient(0),
scraperStopping: make(chan struct{}),
scraperStopped: make(chan struct{}),

View file

@ -38,12 +38,12 @@ func (t fakeTarget) LastError() error {
return nil
}
func (t fakeTarget) Address() string {
func (t fakeTarget) URL() string {
return "fake"
}
func (t fakeTarget) GlobalAddress() string {
return t.Address()
func (t fakeTarget) GlobalURL() string {
return t.URL()
}
func (t fakeTarget) BaseLabels() clientmodel.LabelSet {

View file

@ -31,11 +31,11 @@ const (
type TargetPool struct {
sync.RWMutex
manager TargetManager
targetsByAddress map[string]Target
interval time.Duration
ingester extraction.Ingester
addTargetQueue chan Target
manager TargetManager
targetsByURL map[string]Target
interval time.Duration
ingester extraction.Ingester
addTargetQueue chan Target
targetProvider TargetProvider
@ -45,14 +45,14 @@ type TargetPool struct {
// NewTargetPool creates a TargetPool, ready to be started by calling Run.
func NewTargetPool(m TargetManager, p TargetProvider, ing extraction.Ingester, i time.Duration) *TargetPool {
return &TargetPool{
manager: m,
interval: i,
ingester: ing,
targetsByAddress: make(map[string]Target),
addTargetQueue: make(chan Target, targetAddQueueSize),
targetProvider: p,
stopping: make(chan struct{}),
stopped: make(chan struct{}),
manager: m,
interval: i,
ingester: ing,
targetsByURL: make(map[string]Target),
addTargetQueue: make(chan Target, targetAddQueueSize),
targetProvider: p,
stopping: make(chan struct{}),
stopped: make(chan struct{}),
}
}
@ -98,7 +98,7 @@ func (p *TargetPool) addTarget(target Target) {
p.Lock()
defer p.Unlock()
p.targetsByAddress[target.Address()] = target
p.targetsByURL[target.URL()] = target
go target.RunScraper(p.ingester, p.interval)
}
@ -109,21 +109,21 @@ func (p *TargetPool) ReplaceTargets(newTargets []Target) {
p.Lock()
defer p.Unlock()
newTargetAddresses := make(utility.Set)
newTargetURLs := make(utility.Set)
for _, newTarget := range newTargets {
newTargetAddresses.Add(newTarget.Address())
oldTarget, ok := p.targetsByAddress[newTarget.Address()]
newTargetURLs.Add(newTarget.URL())
oldTarget, ok := p.targetsByURL[newTarget.URL()]
if ok {
oldTarget.SetBaseLabelsFrom(newTarget)
} else {
p.targetsByAddress[newTarget.Address()] = newTarget
p.targetsByURL[newTarget.URL()] = newTarget
go newTarget.RunScraper(p.ingester, p.interval)
}
}
var wg sync.WaitGroup
for k, oldTarget := range p.targetsByAddress {
if !newTargetAddresses.Has(k) {
for k, oldTarget := range p.targetsByURL {
if !newTargetURLs.Has(k) {
wg.Add(1)
go func(k string, oldTarget Target) {
defer wg.Done()
@ -131,7 +131,7 @@ func (p *TargetPool) ReplaceTargets(newTargets []Target) {
oldTarget.StopScraper()
glog.V(1).Infof("Scraper for target %s stopped.", k)
}(k, oldTarget)
delete(p.targetsByAddress, k)
delete(p.targetsByURL, k)
}
}
wg.Wait()
@ -142,8 +142,8 @@ func (p *TargetPool) Targets() []Target {
p.RLock()
defer p.RUnlock()
targets := make([]Target, 0, len(p.targetsByAddress))
for _, v := range p.targetsByAddress {
targets := make([]Target, 0, len(p.targetsByURL))
for _, v := range p.targetsByURL {
targets = append(targets, v)
}
return targets

View file

@ -27,12 +27,12 @@ func testTargetPool(t testing.TB) {
}
type input struct {
address string
url string
scheduledFor time.Time
}
type output struct {
address string
url string
}
var scenarios = []struct {
@ -49,12 +49,12 @@ func testTargetPool(t testing.TB) {
name: "single element",
inputs: []input{
{
address: "single1",
url: "single1",
},
},
outputs: []output{
{
address: "single1",
url: "single1",
},
},
},
@ -62,18 +62,18 @@ func testTargetPool(t testing.TB) {
name: "plural schedules",
inputs: []input{
{
address: "plural1",
url: "plural1",
},
{
address: "plural2",
url: "plural2",
},
},
outputs: []output{
{
address: "plural1",
url: "plural1",
},
{
address: "plural2",
url: "plural2",
},
},
},
@ -84,24 +84,24 @@ func testTargetPool(t testing.TB) {
for _, input := range scenario.inputs {
target := target{
address: input.address,
url: input.url,
newBaseLabels: make(chan clientmodel.LabelSet, 1),
httpClient: &http.Client{},
}
pool.addTarget(&target)
}
if len(pool.targetsByAddress) != len(scenario.outputs) {
t.Errorf("%s %d. expected TargetPool size to be %d but was %d", scenario.name, i, len(scenario.outputs), len(pool.targetsByAddress))
if len(pool.targetsByURL) != len(scenario.outputs) {
t.Errorf("%s %d. expected TargetPool size to be %d but was %d", scenario.name, i, len(scenario.outputs), len(pool.targetsByURL))
} else {
for j, output := range scenario.outputs {
if target, ok := pool.targetsByAddress[output.address]; !ok {
t.Errorf("%s %d.%d. expected Target address to be %s but was %s", scenario.name, i, j, output.address, target.Address())
if target, ok := pool.targetsByURL[output.url]; !ok {
t.Errorf("%s %d.%d. expected Target url to be %s but was %s", scenario.name, i, j, output.url, target.URL())
}
}
if len(pool.targetsByAddress) != len(scenario.outputs) {
t.Errorf("%s %d. expected to repopulated with %d elements, got %d", scenario.name, i, len(scenario.outputs), len(pool.targetsByAddress))
if len(pool.targetsByURL) != len(scenario.outputs) {
t.Errorf("%s %d. expected to repopulated with %d elements, got %d", scenario.name, i, len(scenario.outputs), len(pool.targetsByURL))
}
}
}
@ -114,7 +114,7 @@ func TestTargetPool(t *testing.T) {
func TestTargetPoolReplaceTargets(t *testing.T) {
pool := NewTargetPool(nil, nil, nopIngester{}, time.Duration(1))
oldTarget1 := &target{
address: "example1",
url: "example1",
state: Unreachable,
scraperStopping: make(chan struct{}),
scraperStopped: make(chan struct{}),
@ -122,7 +122,7 @@ func TestTargetPoolReplaceTargets(t *testing.T) {
httpClient: &http.Client{},
}
oldTarget2 := &target{
address: "example2",
url: "example2",
state: Unreachable,
scraperStopping: make(chan struct{}),
scraperStopped: make(chan struct{}),
@ -130,7 +130,7 @@ func TestTargetPoolReplaceTargets(t *testing.T) {
httpClient: &http.Client{},
}
newTarget1 := &target{
address: "example1",
url: "example1",
state: Alive,
scraperStopping: make(chan struct{}),
scraperStopped: make(chan struct{}),
@ -138,7 +138,7 @@ func TestTargetPoolReplaceTargets(t *testing.T) {
httpClient: &http.Client{},
}
newTarget2 := &target{
address: "example3",
url: "example3",
state: Alive,
scraperStopping: make(chan struct{}),
scraperStopped: make(chan struct{}),
@ -151,14 +151,14 @@ func TestTargetPoolReplaceTargets(t *testing.T) {
pool.ReplaceTargets([]Target{newTarget1, newTarget2})
if len(pool.targetsByAddress) != 2 {
t.Errorf("Expected 2 elements in pool, had %d", len(pool.targetsByAddress))
if len(pool.targetsByURL) != 2 {
t.Errorf("Expected 2 elements in pool, had %d", len(pool.targetsByURL))
}
if pool.targetsByAddress["example1"].State() != oldTarget1.State() {
if pool.targetsByURL["example1"].State() != oldTarget1.State() {
t.Errorf("target1 channel has changed")
}
if pool.targetsByAddress["example3"].State() == oldTarget2.State() {
if pool.targetsByURL["example3"].State() == oldTarget2.State() {
t.Errorf("newTarget2 channel same as oldTarget2's")
}

View file

@ -47,7 +47,7 @@
{{range $pool.Targets}}
<tr>
<td>
<a href="{{.GlobalAddress}}">{{.Address}}</a>
<a href="{{.GlobalURL}}">{{.URL}}</a>
</td>
<td>
{{.State}}