mirror of
https://github.com/google/nomulus
synced 2026-02-09 06:20:29 +00:00
Compare commits
4 Commits
nomulus-20
...
nomulus-20
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
b27b077638 | ||
|
|
0e8cd75a58 | ||
|
|
2a1748ba9c | ||
|
|
f4889191a4 |
@@ -23,20 +23,21 @@ import static google.registry.model.EppResourceUtils.isLinked;
|
||||
import static google.registry.persistence.transaction.TransactionManagerFactory.replicaTm;
|
||||
import static google.registry.util.CollectionUtils.union;
|
||||
|
||||
import com.github.benmanes.caffeine.cache.LoadingCache;
|
||||
import com.google.common.annotations.VisibleForTesting;
|
||||
import com.google.common.base.Strings;
|
||||
import com.google.common.collect.ImmutableList;
|
||||
import com.google.common.collect.ImmutableMap;
|
||||
import com.google.common.collect.ImmutableSet;
|
||||
import com.google.common.collect.ImmutableSetMultimap;
|
||||
import com.google.common.collect.Maps;
|
||||
import com.google.common.collect.Ordering;
|
||||
import com.google.common.collect.Sets;
|
||||
import com.google.common.collect.Streams;
|
||||
import com.google.common.flogger.FluentLogger;
|
||||
import com.google.common.net.InetAddresses;
|
||||
import com.google.gson.JsonArray;
|
||||
import google.registry.config.RegistryConfig;
|
||||
import google.registry.config.RegistryConfig.Config;
|
||||
import google.registry.model.CacheUtils;
|
||||
import google.registry.model.EppResource;
|
||||
import google.registry.model.adapters.EnumToAttributeAdapter.EppEnum;
|
||||
import google.registry.model.contact.Contact;
|
||||
@@ -73,13 +74,11 @@ import google.registry.rdap.RdapObjectClasses.VcardArray;
|
||||
import google.registry.request.RequestServerName;
|
||||
import google.registry.util.Clock;
|
||||
import jakarta.inject.Inject;
|
||||
import jakarta.persistence.Entity;
|
||||
import java.net.Inet4Address;
|
||||
import java.net.Inet6Address;
|
||||
import java.net.InetAddress;
|
||||
import java.net.URI;
|
||||
import java.nio.file.Paths;
|
||||
import java.util.HashMap;
|
||||
import java.util.Locale;
|
||||
import java.util.Optional;
|
||||
import java.util.Set;
|
||||
@@ -103,6 +102,16 @@ public class RdapJsonFormatter {
|
||||
|
||||
private static final FluentLogger logger = FluentLogger.forEnclosingClass();
|
||||
|
||||
@VisibleForTesting
|
||||
record HistoryTimeAndRegistrar(DateTime modificationTime, String registrarId) {}
|
||||
|
||||
private static final LoadingCache<String, ImmutableMap<EventAction, HistoryTimeAndRegistrar>>
|
||||
DOMAIN_HISTORIES_BY_REPO_ID =
|
||||
CacheUtils.newCacheBuilder(RegistryConfig.getEppResourceCachingDuration())
|
||||
// Cache more than the EPP resource cache because we're only caching small objects
|
||||
.maximumSize(RegistryConfig.getEppResourceMaxCachedEntries() * 4L)
|
||||
.build(repoId -> getLastHistoryByType(repoId, Domain.class));
|
||||
|
||||
private DateTime requestTime = null;
|
||||
|
||||
@Inject
|
||||
@@ -860,8 +869,18 @@ public class RdapJsonFormatter {
|
||||
}
|
||||
|
||||
@VisibleForTesting
|
||||
ImmutableMap<EventAction, HistoryEntry> getLastHistoryEntryByType(EppResource resource) {
|
||||
HashMap<EventAction, HistoryEntry> lastEntryOfType = Maps.newHashMap();
|
||||
static ImmutableMap<EventAction, HistoryTimeAndRegistrar> getLastHistoryByType(
|
||||
EppResource eppResource) {
|
||||
if (eppResource instanceof Domain) {
|
||||
return DOMAIN_HISTORIES_BY_REPO_ID.get(eppResource.getRepoId());
|
||||
}
|
||||
return getLastHistoryByType(eppResource.getRepoId(), eppResource.getClass());
|
||||
}
|
||||
|
||||
private static ImmutableMap<EventAction, HistoryTimeAndRegistrar> getLastHistoryByType(
|
||||
String repoId, Class<? extends EppResource> resourceType) {
|
||||
ImmutableMap.Builder<EventAction, HistoryTimeAndRegistrar> lastEntryOfType =
|
||||
new ImmutableMap.Builder<>();
|
||||
// Events (such as transfer, but also create) can appear multiple times. We only want the last
|
||||
// time they appeared.
|
||||
//
|
||||
@@ -873,35 +892,33 @@ public class RdapJsonFormatter {
|
||||
// 2.3.2.3 An event of *eventAction* type *transfer*, with the last date and time that the
|
||||
// domain was transferred. The event of *eventAction* type *transfer* MUST be omitted if the
|
||||
// domain name has not been transferred since it was created.
|
||||
VKey<? extends EppResource> resourceVkey = resource.createVKey();
|
||||
Class<? extends HistoryEntry> historyClass =
|
||||
HistoryEntryDao.getHistoryClassFromParent(resourceVkey.getKind());
|
||||
String entityName = historyClass.getAnnotation(Entity.class).name();
|
||||
if (Strings.isNullOrEmpty(entityName)) {
|
||||
entityName = historyClass.getSimpleName();
|
||||
}
|
||||
String entityName = HistoryEntryDao.getHistoryClassFromParent(resourceType).getSimpleName();
|
||||
String jpql =
|
||||
GET_LAST_HISTORY_BY_TYPE_JPQL_TEMPLATE
|
||||
.replace("%entityName%", entityName)
|
||||
.replace("%repoIdValue%", resourceVkey.getKey().toString());
|
||||
Iterable<HistoryEntry> historyEntries =
|
||||
replicaTm()
|
||||
.transact(
|
||||
() ->
|
||||
replicaTm()
|
||||
.getEntityManager()
|
||||
.createQuery(jpql, HistoryEntry.class)
|
||||
.getResultList());
|
||||
for (HistoryEntry historyEntry : historyEntries) {
|
||||
EventAction rdapEventAction =
|
||||
HISTORY_ENTRY_TYPE_TO_RDAP_EVENT_ACTION_MAP.get(historyEntry.getType());
|
||||
// Only save the historyEntries if this is a type we care about.
|
||||
if (rdapEventAction == null) {
|
||||
continue;
|
||||
}
|
||||
lastEntryOfType.put(rdapEventAction, historyEntry);
|
||||
}
|
||||
return ImmutableMap.copyOf(lastEntryOfType);
|
||||
.replace("%repoIdValue%", repoId);
|
||||
replicaTm()
|
||||
.transact(
|
||||
() ->
|
||||
replicaTm()
|
||||
.getEntityManager()
|
||||
.createQuery(jpql, HistoryEntry.class)
|
||||
.getResultStream()
|
||||
.forEach(
|
||||
historyEntry -> {
|
||||
EventAction rdapEventAction =
|
||||
HISTORY_ENTRY_TYPE_TO_RDAP_EVENT_ACTION_MAP.get(
|
||||
historyEntry.getType());
|
||||
// Only save the entries if this is a type we care about.
|
||||
if (rdapEventAction != null) {
|
||||
lastEntryOfType.put(
|
||||
rdapEventAction,
|
||||
new HistoryTimeAndRegistrar(
|
||||
historyEntry.getModificationTime(),
|
||||
historyEntry.getRegistrarId()));
|
||||
}
|
||||
}));
|
||||
return lastEntryOfType.buildKeepingLast();
|
||||
}
|
||||
|
||||
/**
|
||||
@@ -915,7 +932,8 @@ public class RdapJsonFormatter {
|
||||
* that we don't need to load HistoryEntries for "summary" responses).
|
||||
*/
|
||||
private ImmutableList<Event> makeOptionalEvents(EppResource resource) {
|
||||
ImmutableMap<EventAction, HistoryEntry> lastEntryOfType = getLastHistoryEntryByType(resource);
|
||||
ImmutableMap<EventAction, HistoryTimeAndRegistrar> lastHistoryOfType =
|
||||
getLastHistoryByType(resource);
|
||||
ImmutableList.Builder<Event> eventsBuilder = new ImmutableList.Builder<>();
|
||||
DateTime creationTime = resource.getCreationTime();
|
||||
DateTime lastChangeTime =
|
||||
@@ -923,12 +941,12 @@ public class RdapJsonFormatter {
|
||||
// The order of the elements is stable - it's the order in which the enum elements are defined
|
||||
// in EventAction
|
||||
for (EventAction rdapEventAction : EventAction.values()) {
|
||||
HistoryEntry historyEntry = lastEntryOfType.get(rdapEventAction);
|
||||
HistoryTimeAndRegistrar historyTimeAndRegistrar = lastHistoryOfType.get(rdapEventAction);
|
||||
// Check if there was any entry of this type
|
||||
if (historyEntry == null) {
|
||||
if (historyTimeAndRegistrar == null) {
|
||||
continue;
|
||||
}
|
||||
DateTime modificationTime = historyEntry.getModificationTime();
|
||||
DateTime modificationTime = historyTimeAndRegistrar.modificationTime();
|
||||
// We will ignore all events that happened before the "creation time", since these events are
|
||||
// from a "previous incarnation of the domain" (for a domain that was owned by someone,
|
||||
// deleted, and then bought by someone else)
|
||||
@@ -938,7 +956,7 @@ public class RdapJsonFormatter {
|
||||
eventsBuilder.add(
|
||||
Event.builder()
|
||||
.setEventAction(rdapEventAction)
|
||||
.setEventActor(historyEntry.getRegistrarId())
|
||||
.setEventActor(historyTimeAndRegistrar.registrarId())
|
||||
.setEventDate(modificationTime)
|
||||
.build());
|
||||
// The last change time might not be the lastEppUpdateTime, since some changes happen without
|
||||
@@ -951,21 +969,16 @@ public class RdapJsonFormatter {
|
||||
// The event of eventAction type last changed MUST be omitted if the domain name has not been
|
||||
// updated since it was created
|
||||
if (lastChangeTime.isAfter(creationTime)) {
|
||||
eventsBuilder.add(makeEvent(EventAction.LAST_CHANGED, null, lastChangeTime));
|
||||
// Creates an RDAP event object as defined by RFC 9083
|
||||
eventsBuilder.add(
|
||||
Event.builder()
|
||||
.setEventAction(EventAction.LAST_CHANGED)
|
||||
.setEventDate(lastChangeTime)
|
||||
.build());
|
||||
}
|
||||
return eventsBuilder.build();
|
||||
}
|
||||
|
||||
/** Creates an RDAP event object as defined by RFC 9083. */
|
||||
private static Event makeEvent(
|
||||
EventAction eventAction, @Nullable String eventActor, DateTime eventDate) {
|
||||
Event.Builder builder = Event.builder().setEventAction(eventAction).setEventDate(eventDate);
|
||||
if (eventActor != null) {
|
||||
builder.setEventActor(eventActor);
|
||||
}
|
||||
return builder.build();
|
||||
}
|
||||
|
||||
/**
|
||||
* Creates a vCard address entry: array of strings specifying the components of the address.
|
||||
*
|
||||
|
||||
@@ -462,12 +462,12 @@ class RdapJsonFormatterTest {
|
||||
}
|
||||
|
||||
@Test
|
||||
void testGetLastHistoryEntryByType() {
|
||||
void testGetLastHistoryByType() {
|
||||
// Expected data are from "rdapjson_domain_summary.json"
|
||||
assertThat(
|
||||
Maps.transformValues(
|
||||
rdapJsonFormatter.getLastHistoryEntryByType(domainFull),
|
||||
HistoryEntry::getModificationTime))
|
||||
RdapJsonFormatter.getLastHistoryByType(domainFull),
|
||||
RdapJsonFormatter.HistoryTimeAndRegistrar::modificationTime))
|
||||
.containsExactlyEntriesIn(
|
||||
ImmutableMap.of(TRANSFER, DateTime.parse("1999-12-01T00:00:00.000Z")));
|
||||
}
|
||||
|
||||
@@ -113,6 +113,8 @@ public class ProxyConfig {
|
||||
public int stackdriverMaxQps;
|
||||
public int stackdriverMaxPointsPerRequest;
|
||||
public int writeIntervalSeconds;
|
||||
public double frontendMetricsRatio;
|
||||
public double backendMetricsRatio;
|
||||
}
|
||||
|
||||
/** Configuration options that apply to quota management. */
|
||||
|
||||
@@ -61,6 +61,7 @@ import java.time.Duration;
|
||||
import java.util.Arrays;
|
||||
import java.util.Base64;
|
||||
import java.util.Optional;
|
||||
import java.util.Random;
|
||||
import java.util.Set;
|
||||
import java.util.concurrent.ExecutorService;
|
||||
import java.util.concurrent.Executors;
|
||||
@@ -395,6 +396,26 @@ public class ProxyModule {
|
||||
return Duration.ofSeconds(config.serverCertificateCacheSeconds);
|
||||
}
|
||||
|
||||
@Singleton
|
||||
@Provides
|
||||
@Named("frontendMetricsRatio")
|
||||
static double provideFrontendMetricsRatio(ProxyConfig config) {
|
||||
return config.metrics.frontendMetricsRatio;
|
||||
}
|
||||
|
||||
@Singleton
|
||||
@Provides
|
||||
@Named("backendMetricsRatio")
|
||||
static double provideBackendMetricsRatio(ProxyConfig config) {
|
||||
return config.metrics.backendMetricsRatio;
|
||||
}
|
||||
|
||||
@Singleton
|
||||
@Provides
|
||||
static Random provideRandom() {
|
||||
return new Random();
|
||||
}
|
||||
|
||||
/** Root level component that exposes the port-to-protocol map. */
|
||||
@Singleton
|
||||
@Component(
|
||||
|
||||
@@ -200,3 +200,15 @@ metrics:
|
||||
|
||||
# How often metrics are written.
|
||||
writeIntervalSeconds: 60
|
||||
|
||||
# What ratio of frontend request metrics should be stochastically recorded
|
||||
# (0.0 means none, 1.0 means all). This is useful for reducing metrics volume,
|
||||
# and thus cost, while still recording some information for performance
|
||||
# monitoring purposes.
|
||||
frontendMetricsRatio: 1.0
|
||||
|
||||
# What ratio of backend request metrics should be stochastically recorded
|
||||
# (0.0 means none, 1.0 means all). This is useful for reducing metrics volume,
|
||||
# and thus cost, while still recording some information for performance
|
||||
# monitoring purposes.
|
||||
backendMetricsRatio: 1.0
|
||||
|
||||
@@ -22,7 +22,9 @@ import com.google.monitoring.metrics.MetricRegistryImpl;
|
||||
import google.registry.util.NonFinalForTesting;
|
||||
import io.netty.handler.codec.http.FullHttpResponse;
|
||||
import jakarta.inject.Inject;
|
||||
import jakarta.inject.Named;
|
||||
import jakarta.inject.Singleton;
|
||||
import java.util.Random;
|
||||
import org.joda.time.Duration;
|
||||
|
||||
/** Backend metrics instrumentation. */
|
||||
@@ -75,8 +77,14 @@ public class BackendMetrics extends BaseMetrics {
|
||||
LABELS,
|
||||
DEFAULT_LATENCY_FITTER);
|
||||
|
||||
private final Random random;
|
||||
private final double backendMetricsRatio;
|
||||
|
||||
@Inject
|
||||
BackendMetrics() {}
|
||||
BackendMetrics(@Named("backendMetricsRatio") double backendMetricsRatio, Random random) {
|
||||
this.backendMetricsRatio = backendMetricsRatio;
|
||||
this.random = random;
|
||||
}
|
||||
|
||||
@Override
|
||||
void resetMetrics() {
|
||||
@@ -89,15 +97,45 @@ public class BackendMetrics extends BaseMetrics {
|
||||
|
||||
@NonFinalForTesting
|
||||
public void requestSent(String protocol, String certHash, int bytes) {
|
||||
requestsCounter.increment(protocol, certHash);
|
||||
// Short-circuit metrics recording randomly according to the configured ratio.
|
||||
if (random.nextDouble() > backendMetricsRatio) {
|
||||
return;
|
||||
}
|
||||
requestsCounter.incrementBy(roundRatioReciprocal(), protocol, certHash);
|
||||
requestBytes.record(bytes, protocol, certHash);
|
||||
}
|
||||
|
||||
@NonFinalForTesting
|
||||
public void responseReceived(
|
||||
String protocol, String certHash, FullHttpResponse response, Duration latency) {
|
||||
// Short-circuit metrics recording randomly according to the configured ratio.
|
||||
if (random.nextDouble() > backendMetricsRatio) {
|
||||
return;
|
||||
}
|
||||
latencyMs.record(latency.getMillis(), protocol, certHash);
|
||||
responseBytes.record(response.content().readableBytes(), protocol, certHash);
|
||||
responsesCounter.increment(protocol, certHash, response.status().toString());
|
||||
responsesCounter.incrementBy(
|
||||
roundRatioReciprocal(), protocol, certHash, response.status().toString());
|
||||
}
|
||||
|
||||
/**
|
||||
* Returns the reciprocal of the backend metrics ratio, stochastically rounded to the nearest int.
|
||||
*
|
||||
* <p>This is necessary because if we are only going to record a metric, say, 1/20th of the time,
|
||||
* then each time we do record it, we should increment it by 20 so that, modulo some randomness,
|
||||
* the total figures still add up to the same amount.
|
||||
*
|
||||
* <p>The stochastic rounding is necessary to prevent introducing errors stemming from rounding a
|
||||
* non-integer reciprocal consistently to the floor or ceiling. As an example, if the ratio is
|
||||
* .03, then the reciprocal would be 33.3..., so two-thirds of the time it should increment by 33
|
||||
* and one-third of the time it should increment by 34, calculated randomly, so that the overall
|
||||
* total adds up correctly.
|
||||
*/
|
||||
private long roundRatioReciprocal() {
|
||||
double reciprocal = 1 / backendMetricsRatio;
|
||||
return (long)
|
||||
((random.nextDouble() < reciprocal - Math.floor(reciprocal))
|
||||
? Math.ceil(reciprocal)
|
||||
: Math.floor(reciprocal));
|
||||
}
|
||||
}
|
||||
|
||||
@@ -26,8 +26,10 @@ import io.netty.channel.group.ChannelGroup;
|
||||
import io.netty.channel.group.DefaultChannelGroup;
|
||||
import io.netty.util.concurrent.GlobalEventExecutor;
|
||||
import jakarta.inject.Inject;
|
||||
import jakarta.inject.Named;
|
||||
import jakarta.inject.Singleton;
|
||||
import java.util.Map;
|
||||
import java.util.Random;
|
||||
import java.util.concurrent.ConcurrentHashMap;
|
||||
import java.util.concurrent.ConcurrentMap;
|
||||
import org.joda.time.Duration;
|
||||
@@ -78,8 +80,14 @@ public class FrontendMetrics extends BaseMetrics {
|
||||
LABELS,
|
||||
DEFAULT_LATENCY_FITTER);
|
||||
|
||||
private final Random random;
|
||||
private final double frontendMetricsRatio;
|
||||
|
||||
@Inject
|
||||
public FrontendMetrics() {}
|
||||
FrontendMetrics(@Named("frontendMetricsRatio") double frontendMetricsRatio, Random random) {
|
||||
this.frontendMetricsRatio = frontendMetricsRatio;
|
||||
this.random = random;
|
||||
}
|
||||
|
||||
@Override
|
||||
void resetMetrics() {
|
||||
@@ -109,6 +117,10 @@ public class FrontendMetrics extends BaseMetrics {
|
||||
|
||||
@NonFinalForTesting
|
||||
public void responseSent(String protocol, String certHash, Duration latency) {
|
||||
// Short-circuit metrics recording randomly according to the configured ratio.
|
||||
if (random.nextDouble() > frontendMetricsRatio) {
|
||||
return;
|
||||
}
|
||||
latencyMs.record(latency.getMillis(), protocol, certHash);
|
||||
}
|
||||
}
|
||||
|
||||
@@ -56,6 +56,7 @@ import jakarta.inject.Named;
|
||||
import jakarta.inject.Provider;
|
||||
import jakarta.inject.Singleton;
|
||||
import java.time.Duration;
|
||||
import java.util.Random;
|
||||
import java.util.concurrent.ExecutorService;
|
||||
import java.util.concurrent.Executors;
|
||||
import java.util.concurrent.ScheduledExecutorService;
|
||||
@@ -312,6 +313,26 @@ public abstract class ProtocolModuleTest {
|
||||
return Duration.ofHours(1);
|
||||
}
|
||||
|
||||
@Singleton
|
||||
@Provides
|
||||
@Named("frontendMetricsRatio")
|
||||
static double provideFrontendMetricsRatio() {
|
||||
return 1.0;
|
||||
}
|
||||
|
||||
@Singleton
|
||||
@Provides
|
||||
@Named("backendMetricsRatio")
|
||||
static double providebackendMetricsRatio() {
|
||||
return 1.0;
|
||||
}
|
||||
|
||||
@Singleton
|
||||
@Provides
|
||||
static Random provideRandom() {
|
||||
return new Random();
|
||||
}
|
||||
|
||||
// This method is only here to satisfy Dagger binding, but is never used. In test environment,
|
||||
// it is the self-signed certificate and its key that ends up being used.
|
||||
@Singleton
|
||||
|
||||
@@ -18,11 +18,14 @@ import static com.google.monitoring.metrics.contrib.DistributionMetricSubject.as
|
||||
import static com.google.monitoring.metrics.contrib.LongMetricSubject.assertThat;
|
||||
import static google.registry.proxy.TestUtils.makeHttpPostRequest;
|
||||
import static google.registry.proxy.TestUtils.makeHttpResponse;
|
||||
import static org.mockito.Mockito.mock;
|
||||
import static org.mockito.Mockito.when;
|
||||
|
||||
import com.google.common.collect.ImmutableSet;
|
||||
import io.netty.handler.codec.http.FullHttpRequest;
|
||||
import io.netty.handler.codec.http.FullHttpResponse;
|
||||
import io.netty.handler.codec.http.HttpResponseStatus;
|
||||
import java.util.Random;
|
||||
import org.joda.time.Duration;
|
||||
import org.junit.jupiter.api.BeforeEach;
|
||||
import org.junit.jupiter.api.Test;
|
||||
@@ -34,10 +37,11 @@ class BackendMetricsTest {
|
||||
private final String certHash = "blah12345";
|
||||
private final String protocol = "frontend protocol";
|
||||
|
||||
private final BackendMetrics metrics = new BackendMetrics();
|
||||
private BackendMetrics metrics;
|
||||
|
||||
@BeforeEach
|
||||
void beforeEach() {
|
||||
metrics = new BackendMetrics(1.0, new Random());
|
||||
metrics.resetMetrics();
|
||||
}
|
||||
|
||||
@@ -107,22 +111,37 @@ class BackendMetricsTest {
|
||||
|
||||
@Test
|
||||
void testSuccess_multipleResponses() {
|
||||
Random mockRandom = mock(Random.class);
|
||||
metrics = new BackendMetrics(0.3, mockRandom);
|
||||
// The reciprocal of this metrics ratio is 3.3..., so depending on stochastic rounding each
|
||||
// response will be recorded as either 3 or 4 (which we hard-code in test by mocking the RNG).
|
||||
when(mockRandom.nextDouble())
|
||||
.thenReturn(
|
||||
/* record response1 */ .1,
|
||||
/* ... as 3 */ .5,
|
||||
/* record response2 */ .04,
|
||||
/* ... as 4 */ .2,
|
||||
/* don't record response3 (skips stochastic rounding) */ .5,
|
||||
/* record response4 */ .15,
|
||||
/* ... as 3 */ .5);
|
||||
String content1 = "some response";
|
||||
String content2 = "other response";
|
||||
String content3 = "a very bad response";
|
||||
FullHttpResponse response1 = makeHttpResponse(content1, HttpResponseStatus.OK);
|
||||
FullHttpResponse response2 = makeHttpResponse(content2, HttpResponseStatus.OK);
|
||||
FullHttpResponse response3 = makeHttpResponse(content3, HttpResponseStatus.BAD_REQUEST);
|
||||
FullHttpResponse response3 = makeHttpResponse(content2, HttpResponseStatus.OK);
|
||||
FullHttpResponse response4 = makeHttpResponse(content3, HttpResponseStatus.BAD_REQUEST);
|
||||
metrics.responseReceived(protocol, certHash, response1, Duration.millis(5));
|
||||
metrics.responseReceived(protocol, certHash, response2, Duration.millis(8));
|
||||
metrics.responseReceived(protocol, certHash, response3, Duration.millis(2));
|
||||
metrics.responseReceived(protocol, certHash, response3, Duration.millis(15));
|
||||
metrics.responseReceived(protocol, certHash, response4, Duration.millis(2));
|
||||
|
||||
assertThat(BackendMetrics.requestsCounter).hasNoOtherValues();
|
||||
assertThat(BackendMetrics.requestBytes).hasNoOtherValues();
|
||||
assertThat(BackendMetrics.responsesCounter)
|
||||
.hasValueForLabels(2, protocol, certHash, "200 OK")
|
||||
.hasValueForLabels(7, protocol, certHash, "200 OK")
|
||||
.and()
|
||||
.hasValueForLabels(1, protocol, certHash, "400 Bad Request")
|
||||
.hasValueForLabels(3, protocol, certHash, "400 Bad Request")
|
||||
.and()
|
||||
.hasNoOtherValues();
|
||||
assertThat(BackendMetrics.responseBytes)
|
||||
|
||||
@@ -15,11 +15,17 @@
|
||||
package google.registry.proxy.metric;
|
||||
|
||||
import static com.google.common.truth.Truth.assertThat;
|
||||
import static com.google.monitoring.metrics.contrib.DistributionMetricSubject.assertThat;
|
||||
import static com.google.monitoring.metrics.contrib.LongMetricSubject.assertThat;
|
||||
import static org.mockito.Mockito.mock;
|
||||
import static org.mockito.Mockito.when;
|
||||
|
||||
import com.google.common.collect.ImmutableSet;
|
||||
import io.netty.channel.ChannelFuture;
|
||||
import io.netty.channel.DefaultChannelId;
|
||||
import io.netty.channel.embedded.EmbeddedChannel;
|
||||
import java.util.Random;
|
||||
import org.joda.time.Duration;
|
||||
import org.junit.jupiter.api.BeforeEach;
|
||||
import org.junit.jupiter.api.Test;
|
||||
|
||||
@@ -28,10 +34,11 @@ class FrontendMetricsTest {
|
||||
|
||||
private static final String PROTOCOL = "some protocol";
|
||||
private static final String CERT_HASH = "abc_blah_1134zdf";
|
||||
private final FrontendMetrics metrics = new FrontendMetrics();
|
||||
private FrontendMetrics metrics;
|
||||
|
||||
@BeforeEach
|
||||
void beforeEach() {
|
||||
metrics = new FrontendMetrics(1.0, new Random());
|
||||
metrics.resetMetrics();
|
||||
}
|
||||
|
||||
@@ -60,8 +67,13 @@ class FrontendMetricsTest {
|
||||
|
||||
@Test
|
||||
void testSuccess_twoConnections_sameClient() {
|
||||
Random mockRandom = mock(Random.class);
|
||||
metrics = new FrontendMetrics(0.2, mockRandom);
|
||||
// The third response won't be logged.
|
||||
when(mockRandom.nextDouble()).thenReturn(.1, .04, .5);
|
||||
EmbeddedChannel channel1 = new EmbeddedChannel();
|
||||
EmbeddedChannel channel2 = new EmbeddedChannel(DefaultChannelId.newInstance());
|
||||
EmbeddedChannel channel3 = new EmbeddedChannel();
|
||||
|
||||
metrics.registerActiveConnection(PROTOCOL, CERT_HASH, channel1);
|
||||
assertThat(channel1.isActive()).isTrue();
|
||||
@@ -85,6 +97,27 @@ class FrontendMetricsTest {
|
||||
.and()
|
||||
.hasNoOtherValues();
|
||||
|
||||
metrics.responseSent(PROTOCOL, CERT_HASH, Duration.millis(10));
|
||||
metrics.responseSent(PROTOCOL, CERT_HASH, Duration.millis(8));
|
||||
metrics.responseSent(PROTOCOL, CERT_HASH, Duration.millis(13));
|
||||
|
||||
metrics.registerActiveConnection(PROTOCOL, CERT_HASH, channel3);
|
||||
assertThat(channel3.isActive()).isTrue();
|
||||
assertThat(FrontendMetrics.activeConnectionsGauge)
|
||||
.hasValueForLabels(2, PROTOCOL, CERT_HASH)
|
||||
.and()
|
||||
.hasNoOtherValues();
|
||||
// All connection counts are recorded as metrics, but ...
|
||||
assertThat(FrontendMetrics.totalConnectionsCounter)
|
||||
.hasValueForLabels(3, PROTOCOL, CERT_HASH)
|
||||
.and()
|
||||
.hasNoOtherValues();
|
||||
// Latency stats are subject to the metrics ratio.
|
||||
assertThat(FrontendMetrics.latencyMs)
|
||||
.hasDataSetForLabels(ImmutableSet.of(10, 8), PROTOCOL, CERT_HASH)
|
||||
.and()
|
||||
.hasNoOtherValues();
|
||||
|
||||
@SuppressWarnings("unused")
|
||||
ChannelFuture unusedFuture1 = channel1.close();
|
||||
assertThat(channel1.isActive()).isFalse();
|
||||
@@ -93,7 +126,7 @@ class FrontendMetricsTest {
|
||||
.and()
|
||||
.hasNoOtherValues();
|
||||
assertThat(FrontendMetrics.totalConnectionsCounter)
|
||||
.hasValueForLabels(2, PROTOCOL, CERT_HASH)
|
||||
.hasValueForLabels(3, PROTOCOL, CERT_HASH)
|
||||
.and()
|
||||
.hasNoOtherValues();
|
||||
|
||||
@@ -102,7 +135,16 @@ class FrontendMetricsTest {
|
||||
assertThat(channel2.isActive()).isFalse();
|
||||
assertThat(FrontendMetrics.activeConnectionsGauge).hasNoOtherValues();
|
||||
assertThat(FrontendMetrics.totalConnectionsCounter)
|
||||
.hasValueForLabels(2, PROTOCOL, CERT_HASH)
|
||||
.hasValueForLabels(3, PROTOCOL, CERT_HASH)
|
||||
.and()
|
||||
.hasNoOtherValues();
|
||||
|
||||
@SuppressWarnings("unused")
|
||||
ChannelFuture unusedFuture3 = channel3.close();
|
||||
assertThat(channel3.isActive()).isFalse();
|
||||
assertThat(FrontendMetrics.activeConnectionsGauge).hasNoOtherValues();
|
||||
assertThat(FrontendMetrics.totalConnectionsCounter)
|
||||
.hasValueForLabels(3, PROTOCOL, CERT_HASH)
|
||||
.and()
|
||||
.hasNoOtherValues();
|
||||
}
|
||||
|
||||
@@ -196,6 +196,7 @@ artifacts:
|
||||
- 'release/cloudbuild-sync-and-tag.yaml'
|
||||
- 'release/cloudbuild-deploy-*.yaml'
|
||||
- 'release/cloudbuild-delete-*.yaml'
|
||||
- 'release/cloudbuild-renew-prober-certs-*.yaml'
|
||||
- 'release/cloudbuild-schema-deploy-*.yaml'
|
||||
- 'release/cloudbuild-schema-verify-*.yaml'
|
||||
- 'release/cloudbuild-restart-proxies-*.yaml'
|
||||
|
||||
@@ -166,6 +166,8 @@ steps:
|
||||
> release/cloudbuild-schema-deploy-${environment}.yaml
|
||||
sed s/'$${_ENV}'/${environment}/g release/cloudbuild-schema-verify.yaml \
|
||||
> release/cloudbuild-schema-verify-${environment}.yaml
|
||||
sed s/'$${_ENV}'/${environment}/g release/cloudbuild-renew-prober-certs.yaml \
|
||||
> release/cloudbuild-renew-prober-certs-${environment}.yaml
|
||||
done
|
||||
# Do text replacement in the k8s manifests.
|
||||
- name: 'gcr.io/cloud-builders/gcloud'
|
||||
|
||||
Reference in New Issue
Block a user