mirror of
https://github.com/google/nomulus
synced 2026-01-28 00:22:20 +00:00
Compare commits
13 Commits
nomulus-20
...
nomulus-20
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
eded6813ab | ||
|
|
bbe5c058fe | ||
|
|
4b0cf576f8 | ||
|
|
045de3889b | ||
|
|
68fc4cd022 | ||
|
|
ebe55146c3 | ||
|
|
807ddf46b9 | ||
|
|
ff8f86090d | ||
|
|
5822f53e14 | ||
|
|
d04b3299aa | ||
|
|
ceade7f954 | ||
|
|
1fcf63facd | ||
|
|
f87e7eb6e6 |
@@ -676,9 +676,9 @@ Optional<List<String>> getToolArgsList() {
|
||||
|
||||
// To run the nomulus tools with these command line tokens:
|
||||
// "--foo", "bar baz", "--qux=quz"
|
||||
// gradle registryTool --args="--foo 'bar baz' --qux=quz"
|
||||
// gradle core:registryTool --args="--foo 'bar baz' --qux=quz"
|
||||
// or:
|
||||
// gradle registryTool --PtoolArgs="--foo|bar baz|--qux=quz"
|
||||
// gradle core:registryTool -PtoolArgs="--foo|bar baz|--qux=quz"
|
||||
// Note that the delimiting pipe can be backslash escaped if it is part of a
|
||||
// parameter.
|
||||
ext.createToolTask = {
|
||||
|
||||
@@ -17,7 +17,7 @@ package google.registry.backup;
|
||||
import static google.registry.backup.ExportCommitLogDiffAction.LOWER_CHECKPOINT_TIME_PARAM;
|
||||
import static google.registry.backup.ExportCommitLogDiffAction.UPPER_CHECKPOINT_TIME_PARAM;
|
||||
import static google.registry.model.ofy.ObjectifyService.auditedOfy;
|
||||
import static google.registry.persistence.transaction.TransactionManagerFactory.tm;
|
||||
import static google.registry.persistence.transaction.TransactionManagerFactory.ofyTm;
|
||||
import static google.registry.util.DateTimeUtils.isBeforeOrAt;
|
||||
|
||||
import com.google.common.collect.ImmutableMultimap;
|
||||
@@ -67,7 +67,8 @@ public final class CommitLogCheckpointAction implements Runnable {
|
||||
final CommitLogCheckpoint checkpoint = strategy.computeCheckpoint();
|
||||
logger.atInfo().log(
|
||||
"Generated candidate checkpoint for time: %s", checkpoint.getCheckpointTime());
|
||||
tm().transact(
|
||||
ofyTm()
|
||||
.transact(
|
||||
() -> {
|
||||
DateTime lastWrittenTime = CommitLogCheckpointRoot.loadRoot().getLastWrittenTime();
|
||||
if (isBeforeOrAt(checkpoint.getCheckpointTime(), lastWrittenTime)) {
|
||||
|
||||
@@ -18,7 +18,7 @@ import static com.google.common.base.Preconditions.checkNotNull;
|
||||
import static com.google.common.base.Preconditions.checkState;
|
||||
import static google.registry.mapreduce.MapreduceRunner.PARAM_DRY_RUN;
|
||||
import static google.registry.model.ofy.ObjectifyService.auditedOfy;
|
||||
import static google.registry.persistence.transaction.TransactionManagerFactory.tm;
|
||||
import static google.registry.persistence.transaction.TransactionManagerFactory.ofyTm;
|
||||
import static java.lang.Boolean.FALSE;
|
||||
import static java.lang.Boolean.TRUE;
|
||||
|
||||
@@ -288,7 +288,8 @@ public final class DeleteOldCommitLogsAction implements Runnable {
|
||||
}
|
||||
|
||||
DeletionResult deletionResult =
|
||||
tm().transactNew(
|
||||
ofyTm()
|
||||
.transactNew(
|
||||
() -> {
|
||||
CommitLogManifest manifest = auditedOfy().load().key(manifestKey).now();
|
||||
// It is possible that the same manifestKey was run twice, if a shard had to be
|
||||
|
||||
@@ -110,7 +110,7 @@ public final class AsyncTaskEnqueuer {
|
||||
.method(Method.POST)
|
||||
.header("Host", backendHostname)
|
||||
.countdownMillis(etaDuration.getMillis())
|
||||
.param(PARAM_RESOURCE_KEY, entityKey.getOfyKey().getString())
|
||||
.param(PARAM_RESOURCE_KEY, entityKey.stringify())
|
||||
.param(PARAM_REQUESTED_TIME, now.toString());
|
||||
if (whenToResave.size() > 1) {
|
||||
task.param(PARAM_RESAVE_TIMES, Joiner.on(',').join(whenToResave.tailSet(firstResave, false)));
|
||||
@@ -131,7 +131,7 @@ public final class AsyncTaskEnqueuer {
|
||||
TaskOptions task =
|
||||
TaskOptions.Builder.withMethod(Method.PULL)
|
||||
.countdownMillis(asyncDeleteDelay.getMillis())
|
||||
.param(PARAM_RESOURCE_KEY, resourceToDelete.createVKey().getOfyKey().getString())
|
||||
.param(PARAM_RESOURCE_KEY, resourceToDelete.createVKey().stringify())
|
||||
.param(PARAM_REQUESTING_CLIENT_ID, requestingRegistrarId)
|
||||
.param(PARAM_SERVER_TRANSACTION_ID, trid.getServerTransactionId())
|
||||
.param(PARAM_IS_SUPERUSER, Boolean.toString(isSuperuser))
|
||||
@@ -148,7 +148,7 @@ public final class AsyncTaskEnqueuer {
|
||||
addTaskToQueueWithRetry(
|
||||
asyncDnsRefreshPullQueue,
|
||||
TaskOptions.Builder.withMethod(Method.PULL)
|
||||
.param(PARAM_HOST_KEY, hostKey.getOfyKey().getString())
|
||||
.param(PARAM_HOST_KEY, hostKey.stringify())
|
||||
.param(PARAM_REQUESTED_TIME, now.toString()));
|
||||
}
|
||||
|
||||
|
||||
@@ -23,6 +23,7 @@ import google.registry.config.RegistryConfig.ConfigModule;
|
||||
import google.registry.persistence.PersistenceModule;
|
||||
import google.registry.persistence.PersistenceModule.BeamBulkQueryJpaTm;
|
||||
import google.registry.persistence.PersistenceModule.BeamJpaTm;
|
||||
import google.registry.persistence.PersistenceModule.BeamReadOnlyReplicaJpaTm;
|
||||
import google.registry.persistence.PersistenceModule.TransactionIsolationLevel;
|
||||
import google.registry.persistence.transaction.JpaTransactionManager;
|
||||
import google.registry.privileges.secretmanager.SecretManagerModule;
|
||||
@@ -59,6 +60,13 @@ public interface RegistryPipelineComponent {
|
||||
@BeamBulkQueryJpaTm
|
||||
Lazy<JpaTransactionManager> getBulkQueryJpaTransactionManager();
|
||||
|
||||
/**
|
||||
* A {@link JpaTransactionManager} that uses the Postgres read-only replica if configured (uses
|
||||
* the standard DB otherwise).
|
||||
*/
|
||||
@BeamReadOnlyReplicaJpaTm
|
||||
Lazy<JpaTransactionManager> getReadOnlyReplicaJpaTransactionManager();
|
||||
|
||||
@Component.Builder
|
||||
interface Builder {
|
||||
|
||||
|
||||
@@ -56,6 +56,10 @@ public class RegistryPipelineWorkerInitializer implements JvmInitializer {
|
||||
case BULK_QUERY:
|
||||
transactionManagerLazy = registryPipelineComponent.getBulkQueryJpaTransactionManager();
|
||||
break;
|
||||
case READ_ONLY_REPLICA:
|
||||
transactionManagerLazy =
|
||||
registryPipelineComponent.getReadOnlyReplicaJpaTransactionManager();
|
||||
break;
|
||||
case REGULAR:
|
||||
default:
|
||||
transactionManagerLazy = registryPipelineComponent.getJpaTransactionManager();
|
||||
|
||||
@@ -23,6 +23,7 @@ import com.google.common.collect.ImmutableSet;
|
||||
import com.googlecode.objectify.Key;
|
||||
import google.registry.backup.VersionedEntity;
|
||||
import google.registry.beam.initsql.Transforms;
|
||||
import google.registry.model.EppResource;
|
||||
import google.registry.model.annotations.DeleteAfterMigration;
|
||||
import google.registry.model.billing.BillingEvent;
|
||||
import google.registry.model.common.Cursor;
|
||||
@@ -42,6 +43,7 @@ import google.registry.model.tld.Registry;
|
||||
import java.util.Map;
|
||||
import java.util.Optional;
|
||||
import java.util.Set;
|
||||
import javax.annotation.Nullable;
|
||||
import org.apache.beam.sdk.Pipeline;
|
||||
import org.apache.beam.sdk.transforms.DoFn;
|
||||
import org.apache.beam.sdk.transforms.ParDo;
|
||||
@@ -93,7 +95,8 @@ public final class DatastoreSnapshots {
|
||||
String commitLogDir,
|
||||
DateTime commitLogFromTime,
|
||||
DateTime commitLogToTime,
|
||||
Set<Class<?>> kinds) {
|
||||
Set<Class<?>> kinds,
|
||||
Optional<DateTime> compareStartTime) {
|
||||
PCollectionTuple snapshot =
|
||||
pipeline.apply(
|
||||
"Load Datastore snapshot.",
|
||||
@@ -112,11 +115,11 @@ public final class DatastoreSnapshots {
|
||||
perTypeSnapshots =
|
||||
perTypeSnapshots.and(
|
||||
createSqlEntityTupleTag((Class<? extends SqlEntity>) kind),
|
||||
datastoreEntityToPojo(perKindSnapshot, kind.getSimpleName()));
|
||||
datastoreEntityToPojo(perKindSnapshot, kind.getSimpleName(), compareStartTime));
|
||||
continue;
|
||||
}
|
||||
Verify.verify(kind == HistoryEntry.class, "Unexpected Non-SqlEntity class: %s", kind);
|
||||
PCollectionTuple historyEntriesByType = splitHistoryEntry(perKindSnapshot);
|
||||
PCollectionTuple historyEntriesByType = splitHistoryEntry(perKindSnapshot, compareStartTime);
|
||||
for (Map.Entry<TupleTag<?>, PCollection<?>> entry :
|
||||
historyEntriesByType.getAll().entrySet()) {
|
||||
perTypeSnapshots = perTypeSnapshots.and(entry.getKey().getId(), entry.getValue());
|
||||
@@ -129,7 +132,9 @@ public final class DatastoreSnapshots {
|
||||
* Splits a {@link PCollection} of {@link HistoryEntry HistoryEntries} into three collections of
|
||||
* its child entities by type.
|
||||
*/
|
||||
static PCollectionTuple splitHistoryEntry(PCollection<VersionedEntity> historyEntries) {
|
||||
static PCollectionTuple splitHistoryEntry(
|
||||
PCollection<VersionedEntity> historyEntries, Optional<DateTime> compareStartTime) {
|
||||
DateTime nullableStartTime = compareStartTime.orElse(null);
|
||||
return historyEntries.apply(
|
||||
"Split HistoryEntry by Resource Type",
|
||||
ParDo.of(
|
||||
@@ -138,6 +143,7 @@ public final class DatastoreSnapshots {
|
||||
public void processElement(
|
||||
@Element VersionedEntity historyEntry, MultiOutputReceiver out) {
|
||||
Optional.ofNullable(Transforms.convertVersionedEntityToSqlEntity(historyEntry))
|
||||
.filter(e -> isEntityIncludedForComparison(e, nullableStartTime))
|
||||
.ifPresent(
|
||||
sqlEntity ->
|
||||
out.get(createSqlEntityTupleTag(sqlEntity.getClass()))
|
||||
@@ -155,7 +161,8 @@ public final class DatastoreSnapshots {
|
||||
* objects.
|
||||
*/
|
||||
static PCollection<SqlEntity> datastoreEntityToPojo(
|
||||
PCollection<VersionedEntity> entities, String desc) {
|
||||
PCollection<VersionedEntity> entities, String desc, Optional<DateTime> compareStartTime) {
|
||||
DateTime nullableStartTime = compareStartTime.orElse(null);
|
||||
return entities.apply(
|
||||
"Datastore Entity to Pojo " + desc,
|
||||
ParDo.of(
|
||||
@@ -164,8 +171,23 @@ public final class DatastoreSnapshots {
|
||||
public void processElement(
|
||||
@Element VersionedEntity entity, OutputReceiver<SqlEntity> out) {
|
||||
Optional.ofNullable(Transforms.convertVersionedEntityToSqlEntity(entity))
|
||||
.filter(e -> isEntityIncludedForComparison(e, nullableStartTime))
|
||||
.ifPresent(out::output);
|
||||
}
|
||||
}));
|
||||
}
|
||||
|
||||
static boolean isEntityIncludedForComparison(
|
||||
SqlEntity entity, @Nullable DateTime compareStartTime) {
|
||||
if (compareStartTime == null) {
|
||||
return true;
|
||||
}
|
||||
if (entity instanceof HistoryEntry) {
|
||||
return compareStartTime.isBefore(((HistoryEntry) entity).getModificationTime());
|
||||
}
|
||||
if (entity instanceof EppResource) {
|
||||
return compareStartTime.isBefore(((EppResource) entity).getUpdateTimestamp().getTimestamp());
|
||||
}
|
||||
return true;
|
||||
}
|
||||
}
|
||||
|
||||
@@ -14,15 +14,22 @@
|
||||
|
||||
package google.registry.beam.comparedb;
|
||||
|
||||
import static com.google.common.base.Preconditions.checkState;
|
||||
import static google.registry.beam.comparedb.ValidateSqlUtils.createSqlEntityTupleTag;
|
||||
import static google.registry.beam.comparedb.ValidateSqlUtils.getMedianIdForHistoryTable;
|
||||
|
||||
import com.google.auto.value.AutoValue;
|
||||
import com.google.common.base.Strings;
|
||||
import com.google.common.base.Verify;
|
||||
import com.google.common.collect.ImmutableList;
|
||||
import com.google.common.collect.ImmutableMap;
|
||||
import com.google.common.collect.ImmutableSet;
|
||||
import com.google.common.collect.ImmutableSetMultimap;
|
||||
import com.google.common.collect.Streams;
|
||||
import google.registry.beam.common.RegistryJpaIO;
|
||||
import google.registry.beam.common.RegistryJpaIO.Read;
|
||||
import google.registry.model.EppResource;
|
||||
import google.registry.model.UpdateAutoTimestamp;
|
||||
import google.registry.model.annotations.DeleteAfterMigration;
|
||||
import google.registry.model.billing.BillingEvent;
|
||||
import google.registry.model.bulkquery.BulkQueryEntities;
|
||||
@@ -50,8 +57,10 @@ import google.registry.model.replay.SqlEntity;
|
||||
import google.registry.model.reporting.DomainTransactionRecord;
|
||||
import google.registry.model.tld.Registry;
|
||||
import google.registry.persistence.transaction.CriteriaQueryBuilder;
|
||||
import google.registry.util.DateTimeUtils;
|
||||
import java.io.Serializable;
|
||||
import java.util.Optional;
|
||||
import javax.persistence.Entity;
|
||||
import org.apache.beam.sdk.Pipeline;
|
||||
import org.apache.beam.sdk.transforms.DoFn;
|
||||
import org.apache.beam.sdk.transforms.Flatten;
|
||||
@@ -65,6 +74,7 @@ import org.apache.beam.sdk.values.PCollectionList;
|
||||
import org.apache.beam.sdk.values.PCollectionTuple;
|
||||
import org.apache.beam.sdk.values.TypeDescriptor;
|
||||
import org.apache.beam.sdk.values.TypeDescriptors;
|
||||
import org.joda.time.DateTime;
|
||||
|
||||
/**
|
||||
* Utilities for loading SQL snapshots.
|
||||
@@ -113,28 +123,48 @@ public final class SqlSnapshots {
|
||||
public static PCollectionTuple loadCloudSqlSnapshotByType(
|
||||
Pipeline pipeline,
|
||||
ImmutableSet<Class<? extends SqlEntity>> sqlEntityTypes,
|
||||
Optional<String> snapshotId) {
|
||||
Optional<String> snapshotId,
|
||||
Optional<DateTime> compareStartTime) {
|
||||
PCollectionTuple perTypeSnapshots = PCollectionTuple.empty(pipeline);
|
||||
for (Class<? extends SqlEntity> clazz : sqlEntityTypes) {
|
||||
if (clazz == DomainBase.class) {
|
||||
perTypeSnapshots =
|
||||
perTypeSnapshots.and(
|
||||
createSqlEntityTupleTag(DomainBase.class),
|
||||
loadAndAssembleDomainBase(pipeline, snapshotId));
|
||||
loadAndAssembleDomainBase(pipeline, snapshotId, compareStartTime));
|
||||
continue;
|
||||
}
|
||||
if (clazz == DomainHistory.class) {
|
||||
perTypeSnapshots =
|
||||
perTypeSnapshots.and(
|
||||
createSqlEntityTupleTag(DomainHistory.class),
|
||||
loadAndAssembleDomainHistory(pipeline, snapshotId));
|
||||
loadAndAssembleDomainHistory(pipeline, snapshotId, compareStartTime));
|
||||
continue;
|
||||
}
|
||||
if (clazz == ContactHistory.class) {
|
||||
perTypeSnapshots =
|
||||
perTypeSnapshots.and(
|
||||
createSqlEntityTupleTag(ContactHistory.class),
|
||||
loadContactHistory(pipeline, snapshotId));
|
||||
loadContactHistory(pipeline, snapshotId, compareStartTime));
|
||||
continue;
|
||||
}
|
||||
if (clazz == HostHistory.class) {
|
||||
perTypeSnapshots =
|
||||
perTypeSnapshots.and(
|
||||
createSqlEntityTupleTag(HostHistory.class),
|
||||
loadHostHistory(
|
||||
pipeline, snapshotId, compareStartTime.orElse(DateTimeUtils.START_OF_TIME)));
|
||||
continue;
|
||||
}
|
||||
if (EppResource.class.isAssignableFrom(clazz) && compareStartTime.isPresent()) {
|
||||
perTypeSnapshots =
|
||||
perTypeSnapshots.and(
|
||||
createSqlEntityTupleTag(clazz),
|
||||
pipeline.apply(
|
||||
"SQL Load " + clazz.getSimpleName(),
|
||||
buildEppResourceQueryWithTimeFilter(
|
||||
clazz, SqlEntity.class, snapshotId, compareStartTime.get())
|
||||
.withSnapshot(snapshotId.orElse(null))));
|
||||
continue;
|
||||
}
|
||||
perTypeSnapshots =
|
||||
@@ -155,20 +185,33 @@ public final class SqlSnapshots {
|
||||
* @see BulkQueryEntities
|
||||
*/
|
||||
public static PCollection<SqlEntity> loadAndAssembleDomainBase(
|
||||
Pipeline pipeline, Optional<String> snapshotId) {
|
||||
Pipeline pipeline, Optional<String> snapshotId, Optional<DateTime> compareStartTime) {
|
||||
PCollection<KV<String, Serializable>> baseObjects =
|
||||
readAllAndAssignKey(pipeline, DomainBaseLite.class, DomainBaseLite::getRepoId, snapshotId);
|
||||
readAllAndAssignKey(
|
||||
pipeline,
|
||||
DomainBaseLite.class,
|
||||
DomainBaseLite::getRepoId,
|
||||
snapshotId,
|
||||
compareStartTime);
|
||||
PCollection<KV<String, Serializable>> gracePeriods =
|
||||
readAllAndAssignKey(pipeline, GracePeriod.class, GracePeriod::getDomainRepoId, snapshotId);
|
||||
readAllAndAssignKey(
|
||||
pipeline,
|
||||
GracePeriod.class,
|
||||
GracePeriod::getDomainRepoId,
|
||||
snapshotId,
|
||||
compareStartTime);
|
||||
PCollection<KV<String, Serializable>> delegationSigners =
|
||||
readAllAndAssignKey(
|
||||
pipeline,
|
||||
DelegationSignerData.class,
|
||||
DelegationSignerData::getDomainRepoId,
|
||||
snapshotId);
|
||||
snapshotId,
|
||||
compareStartTime);
|
||||
PCollection<KV<String, Serializable>> domainHosts =
|
||||
readAllAndAssignKey(pipeline, DomainHost.class, DomainHost::getDomainRepoId, snapshotId);
|
||||
readAllAndAssignKey(
|
||||
pipeline, DomainHost.class, DomainHost::getDomainRepoId, snapshotId, compareStartTime);
|
||||
|
||||
DateTime nullableCompareStartTime = compareStartTime.orElse(null);
|
||||
return PCollectionList.of(
|
||||
ImmutableList.of(baseObjects, gracePeriods, delegationSigners, domainHosts))
|
||||
.apply("SQL Merge DomainBase parts", Flatten.pCollections())
|
||||
@@ -184,6 +227,14 @@ public final class SqlSnapshots {
|
||||
TypedClassifier partsByType = new TypedClassifier(kv.getValue());
|
||||
ImmutableSet<DomainBaseLite> baseObjects =
|
||||
partsByType.getAllOf(DomainBaseLite.class);
|
||||
if (nullableCompareStartTime != null) {
|
||||
Verify.verify(
|
||||
baseObjects.size() <= 1,
|
||||
"Found duplicate DomainBaseLite object per repoId: " + kv.getKey());
|
||||
if (baseObjects.isEmpty()) {
|
||||
return;
|
||||
}
|
||||
}
|
||||
Verify.verify(
|
||||
baseObjects.size() == 1,
|
||||
"Expecting one DomainBaseLite object per repoId: " + kv.getKey());
|
||||
@@ -205,16 +256,16 @@ public final class SqlSnapshots {
|
||||
* <p>This method uses two queries to load data in parallel. This is a performance optimization
|
||||
* specifically for the production database.
|
||||
*/
|
||||
static PCollection<SqlEntity> loadContactHistory(Pipeline pipeline, Optional<String> snapshotId) {
|
||||
long medianId =
|
||||
getMedianIdForHistoryTable("ContactHistory")
|
||||
.orElseThrow(
|
||||
() -> new IllegalStateException("Not a valid database: no ContactHistory."));
|
||||
static PCollection<SqlEntity> loadContactHistory(
|
||||
Pipeline pipeline, Optional<String> snapshotId, Optional<DateTime> compareStartTime) {
|
||||
PartitionedQuery partitionedQuery =
|
||||
buildPartitonedHistoryQuery(ContactHistory.class, compareStartTime);
|
||||
PCollection<SqlEntity> part1 =
|
||||
pipeline.apply(
|
||||
"SQL Load ContactHistory first half",
|
||||
RegistryJpaIO.read(
|
||||
String.format("select c from ContactHistory c where id <= %s", medianId),
|
||||
partitionedQuery.firstHalfQuery(),
|
||||
partitionedQuery.parameters(),
|
||||
false,
|
||||
SqlEntity.class::cast)
|
||||
.withSnapshot(snapshotId.orElse(null)));
|
||||
@@ -222,7 +273,8 @@ public final class SqlSnapshots {
|
||||
pipeline.apply(
|
||||
"SQL Load ContactHistory second half",
|
||||
RegistryJpaIO.read(
|
||||
String.format("select c from ContactHistory c where id > %s", medianId),
|
||||
partitionedQuery.secondHalfQuery(),
|
||||
partitionedQuery.parameters(),
|
||||
false,
|
||||
SqlEntity.class::cast)
|
||||
.withSnapshot(snapshotId.orElse(null)));
|
||||
@@ -231,6 +283,19 @@ public final class SqlSnapshots {
|
||||
.apply("Combine ContactHistory parts", Flatten.pCollections());
|
||||
}
|
||||
|
||||
/** Loads all {@link HostHistory} entities from the database. */
|
||||
static PCollection<SqlEntity> loadHostHistory(
|
||||
Pipeline pipeline, Optional<String> snapshotId, DateTime compareStartTime) {
|
||||
return pipeline.apply(
|
||||
"SQL Load HostHistory",
|
||||
RegistryJpaIO.read(
|
||||
"select c from HostHistory c where :compareStartTime < modificationTime",
|
||||
ImmutableMap.of("compareStartTime", compareStartTime),
|
||||
false,
|
||||
SqlEntity.class::cast)
|
||||
.withSnapshot(snapshotId.orElse(null)));
|
||||
}
|
||||
|
||||
/**
|
||||
* Bulk-loads all parts of {@link DomainHistory} and assembles them in the pipeline.
|
||||
*
|
||||
@@ -240,16 +305,15 @@ public final class SqlSnapshots {
|
||||
* @see BulkQueryEntities
|
||||
*/
|
||||
static PCollection<SqlEntity> loadAndAssembleDomainHistory(
|
||||
Pipeline pipeline, Optional<String> snapshotId) {
|
||||
long medianId =
|
||||
getMedianIdForHistoryTable("DomainHistory")
|
||||
.orElseThrow(
|
||||
() -> new IllegalStateException("Not a valid database: no DomainHistory."));
|
||||
Pipeline pipeline, Optional<String> snapshotId, Optional<DateTime> compareStartTime) {
|
||||
PartitionedQuery partitionedQuery =
|
||||
buildPartitonedHistoryQuery(DomainHistoryLite.class, compareStartTime);
|
||||
PCollection<KV<String, Serializable>> baseObjectsPart1 =
|
||||
queryAndAssignKey(
|
||||
pipeline,
|
||||
"first half",
|
||||
String.format("select c from DomainHistory c where id <= %s", medianId),
|
||||
partitionedQuery.firstHalfQuery(),
|
||||
partitionedQuery.parameters(),
|
||||
DomainHistoryLite.class,
|
||||
compose(DomainHistoryLite::getDomainHistoryId, DomainHistoryId::toString),
|
||||
snapshotId);
|
||||
@@ -257,7 +321,8 @@ public final class SqlSnapshots {
|
||||
queryAndAssignKey(
|
||||
pipeline,
|
||||
"second half",
|
||||
String.format("select c from DomainHistory c where id > %s", medianId),
|
||||
partitionedQuery.secondHalfQuery(),
|
||||
partitionedQuery.parameters(),
|
||||
DomainHistoryLite.class,
|
||||
compose(DomainHistoryLite::getDomainHistoryId, DomainHistoryId::toString),
|
||||
snapshotId);
|
||||
@@ -266,26 +331,31 @@ public final class SqlSnapshots {
|
||||
pipeline,
|
||||
GracePeriodHistory.class,
|
||||
compose(GracePeriodHistory::getDomainHistoryId, DomainHistoryId::toString),
|
||||
snapshotId);
|
||||
snapshotId,
|
||||
compareStartTime);
|
||||
PCollection<KV<String, Serializable>> delegationSigners =
|
||||
readAllAndAssignKey(
|
||||
pipeline,
|
||||
DomainDsDataHistory.class,
|
||||
compose(DomainDsDataHistory::getDomainHistoryId, DomainHistoryId::toString),
|
||||
snapshotId);
|
||||
snapshotId,
|
||||
compareStartTime);
|
||||
PCollection<KV<String, Serializable>> domainHosts =
|
||||
readAllAndAssignKey(
|
||||
pipeline,
|
||||
DomainHistoryHost.class,
|
||||
compose(DomainHistoryHost::getDomainHistoryId, DomainHistoryId::toString),
|
||||
snapshotId);
|
||||
snapshotId,
|
||||
compareStartTime);
|
||||
PCollection<KV<String, Serializable>> transactionRecords =
|
||||
readAllAndAssignKey(
|
||||
pipeline,
|
||||
DomainTransactionRecord.class,
|
||||
compose(DomainTransactionRecord::getDomainHistoryId, DomainHistoryId::toString),
|
||||
snapshotId);
|
||||
snapshotId,
|
||||
compareStartTime);
|
||||
|
||||
DateTime nullableCompareStartTime = compareStartTime.orElse(null);
|
||||
return PCollectionList.of(
|
||||
ImmutableList.of(
|
||||
baseObjectsPart1,
|
||||
@@ -307,6 +377,15 @@ public final class SqlSnapshots {
|
||||
TypedClassifier partsByType = new TypedClassifier(kv.getValue());
|
||||
ImmutableSet<DomainHistoryLite> baseObjects =
|
||||
partsByType.getAllOf(DomainHistoryLite.class);
|
||||
if (nullableCompareStartTime != null) {
|
||||
Verify.verify(
|
||||
baseObjects.size() <= 1,
|
||||
"Found duplicate DomainHistoryLite object per domainHistoryId: "
|
||||
+ kv.getKey());
|
||||
if (baseObjects.isEmpty()) {
|
||||
return;
|
||||
}
|
||||
}
|
||||
Verify.verify(
|
||||
baseObjects.size() == 1,
|
||||
"Expecting one DomainHistoryLite object per domainHistoryId: "
|
||||
@@ -328,12 +407,19 @@ public final class SqlSnapshots {
|
||||
Pipeline pipeline,
|
||||
Class<R> type,
|
||||
SerializableFunction<R, String> keyFunction,
|
||||
Optional<String> snapshotId) {
|
||||
Optional<String> snapshotId,
|
||||
Optional<DateTime> compareStartTime) {
|
||||
Read<R, R> queryObject;
|
||||
if (compareStartTime.isPresent() && EppResource.class.isAssignableFrom(type)) {
|
||||
queryObject =
|
||||
buildEppResourceQueryWithTimeFilter(type, type, snapshotId, compareStartTime.get());
|
||||
} else {
|
||||
queryObject =
|
||||
RegistryJpaIO.read(() -> CriteriaQueryBuilder.create(type).build())
|
||||
.withSnapshot(snapshotId.orElse(null));
|
||||
}
|
||||
return pipeline
|
||||
.apply(
|
||||
"SQL Load " + type.getSimpleName(),
|
||||
RegistryJpaIO.read(() -> CriteriaQueryBuilder.create(type).build())
|
||||
.withSnapshot(snapshotId.orElse(null)))
|
||||
.apply("SQL Load " + type.getSimpleName(), queryObject)
|
||||
.apply(
|
||||
"Assign Key to " + type.getSimpleName(),
|
||||
MapElements.into(
|
||||
@@ -346,13 +432,15 @@ public final class SqlSnapshots {
|
||||
Pipeline pipeline,
|
||||
String diffrentiator,
|
||||
String jplQuery,
|
||||
ImmutableMap<String, Object> queryParameters,
|
||||
Class<R> type,
|
||||
SerializableFunction<R, String> keyFunction,
|
||||
Optional<String> snapshotId) {
|
||||
return pipeline
|
||||
.apply(
|
||||
"SQL Load " + type.getSimpleName() + " " + diffrentiator,
|
||||
RegistryJpaIO.read(jplQuery, false, type::cast).withSnapshot(snapshotId.orElse(null)))
|
||||
RegistryJpaIO.read(jplQuery, queryParameters, false, type::cast)
|
||||
.withSnapshot(snapshotId.orElse(null)))
|
||||
.apply(
|
||||
"Assign Key to " + type.getSimpleName() + " " + diffrentiator,
|
||||
MapElements.into(
|
||||
@@ -367,6 +455,71 @@ public final class SqlSnapshots {
|
||||
return r -> f2.apply(f1.apply(r));
|
||||
}
|
||||
|
||||
static <R, T> Read<R, T> buildEppResourceQueryWithTimeFilter(
|
||||
Class<R> entityType,
|
||||
Class<T> castOutputAsType,
|
||||
Optional<String> snapshotId,
|
||||
DateTime compareStartTime) {
|
||||
String tableName = getJpaEntityName(entityType);
|
||||
String jpql =
|
||||
String.format("select c from %s c where :compareStartTime < updateTimestamp", tableName);
|
||||
return RegistryJpaIO.read(
|
||||
jpql,
|
||||
ImmutableMap.of("compareStartTime", UpdateAutoTimestamp.create(compareStartTime)),
|
||||
false,
|
||||
(R x) -> castOutputAsType.cast(x))
|
||||
.withSnapshot(snapshotId.orElse(null));
|
||||
}
|
||||
|
||||
static PartitionedQuery buildPartitonedHistoryQuery(
|
||||
Class<?> entityType, Optional<DateTime> compareStartTime) {
|
||||
String tableName = getJpaEntityName(entityType);
|
||||
Verify.verify(
|
||||
!Strings.isNullOrEmpty(tableName), "Invalid entity type %s", entityType.getSimpleName());
|
||||
long medianId =
|
||||
getMedianIdForHistoryTable(tableName)
|
||||
.orElseThrow(() -> new IllegalStateException("Not a valid database: no " + tableName));
|
||||
String firstHalfQuery = String.format("select c from %s c where id <= :historyId", tableName);
|
||||
String secondHalfQuery = String.format("select c from %s c where id > :historyId", tableName);
|
||||
if (compareStartTime.isPresent()) {
|
||||
String timeFilter = " and :compareStartTime < modificationTime";
|
||||
firstHalfQuery += timeFilter;
|
||||
secondHalfQuery += timeFilter;
|
||||
return PartitionedQuery.createPartitionedQuery(
|
||||
firstHalfQuery,
|
||||
secondHalfQuery,
|
||||
ImmutableMap.of("historyId", medianId, "compareStartTime", compareStartTime.get()));
|
||||
} else {
|
||||
return PartitionedQuery.createPartitionedQuery(
|
||||
firstHalfQuery, secondHalfQuery, ImmutableMap.of("historyId", medianId));
|
||||
}
|
||||
}
|
||||
|
||||
private static String getJpaEntityName(Class entityType) {
|
||||
Entity entityAnnotation = (Entity) entityType.getAnnotation(Entity.class);
|
||||
checkState(
|
||||
entityAnnotation != null, "Unexpected non-entity type %s", entityType.getSimpleName());
|
||||
return Strings.isNullOrEmpty(entityAnnotation.name())
|
||||
? entityType.getSimpleName()
|
||||
: entityAnnotation.name();
|
||||
}
|
||||
|
||||
/** Contains two queries that partition the target table in two. */
|
||||
@AutoValue
|
||||
abstract static class PartitionedQuery {
|
||||
abstract String firstHalfQuery();
|
||||
|
||||
abstract String secondHalfQuery();
|
||||
|
||||
abstract ImmutableMap<String, Object> parameters();
|
||||
|
||||
public static PartitionedQuery createPartitionedQuery(
|
||||
String firstHalfQuery, String secondHalfQuery, ImmutableMap<String, Object> parameters) {
|
||||
return new AutoValue_SqlSnapshots_PartitionedQuery(
|
||||
firstHalfQuery, secondHalfQuery, parameters);
|
||||
}
|
||||
}
|
||||
|
||||
/** Container that receives mixed-typed data and groups them by {@link Class}. */
|
||||
static class TypedClassifier {
|
||||
private final ImmutableSetMultimap<Class<?>, Object> classifiedEntities;
|
||||
|
||||
@@ -126,6 +126,9 @@ public class ValidateSqlPipeline {
|
||||
.getCoderRegistry()
|
||||
.registerCoderForClass(SqlEntity.class, SerializableCoder.of(Serializable.class));
|
||||
|
||||
Optional<DateTime> compareStartTime =
|
||||
Optional.ofNullable(options.getComparisonStartTimestamp()).map(DateTime::parse);
|
||||
|
||||
PCollectionTuple datastoreSnapshot =
|
||||
DatastoreSnapshots.loadDatastoreSnapshotByKind(
|
||||
pipeline,
|
||||
@@ -135,11 +138,12 @@ public class ValidateSqlPipeline {
|
||||
// Increase by 1ms since we want to include commitLogs latestCommitLogTime but
|
||||
// this parameter is exclusive.
|
||||
latestCommitLogTime.plusMillis(1),
|
||||
DatastoreSnapshots.ALL_DATASTORE_KINDS);
|
||||
DatastoreSnapshots.ALL_DATASTORE_KINDS,
|
||||
compareStartTime);
|
||||
|
||||
PCollectionTuple cloudSqlSnapshot =
|
||||
SqlSnapshots.loadCloudSqlSnapshotByType(
|
||||
pipeline, SqlSnapshots.ALL_SQL_ENTITIES, sqlSnapshotId);
|
||||
pipeline, SqlSnapshots.ALL_SQL_ENTITIES, sqlSnapshotId, compareStartTime);
|
||||
|
||||
verify(
|
||||
datastoreSnapshot.getAll().keySet().equals(cloudSqlSnapshot.getAll().keySet()),
|
||||
|
||||
@@ -16,7 +16,19 @@ package google.registry.beam.comparedb;
|
||||
|
||||
import google.registry.beam.common.RegistryPipelineOptions;
|
||||
import google.registry.model.annotations.DeleteAfterMigration;
|
||||
import javax.annotation.Nullable;
|
||||
import org.apache.beam.sdk.options.Description;
|
||||
|
||||
/** BEAM pipeline options for {@link ValidateSqlPipeline}. */
|
||||
@DeleteAfterMigration
|
||||
public interface ValidateSqlPipelineOptions extends RegistryPipelineOptions {}
|
||||
public interface ValidateSqlPipelineOptions extends RegistryPipelineOptions {
|
||||
|
||||
@Description(
|
||||
"For history entries and EPP resources, only those modified strictly after this time are "
|
||||
+ "included in comparison. Value is in ISO8601 format. "
|
||||
+ "Other entity types are not affected.")
|
||||
@Nullable
|
||||
String getComparisonStartTimestamp();
|
||||
|
||||
void setComparisonStartTimestamp(String comparisonStartTimestamp);
|
||||
}
|
||||
|
||||
@@ -33,6 +33,7 @@ import com.google.common.collect.ImmutableSet;
|
||||
import com.google.common.collect.ImmutableSortedMap;
|
||||
import dagger.Module;
|
||||
import dagger.Provides;
|
||||
import google.registry.persistence.transaction.JpaTransactionManager;
|
||||
import google.registry.util.TaskQueueUtils;
|
||||
import google.registry.util.YamlUtils;
|
||||
import java.lang.annotation.Documented;
|
||||
@@ -392,19 +393,26 @@ public final class RegistryConfig {
|
||||
|
||||
@Provides
|
||||
@Config("cloudSqlJdbcUrl")
|
||||
public static String providesCloudSqlJdbcUrl(RegistryConfigSettings config) {
|
||||
public static String provideCloudSqlJdbcUrl(RegistryConfigSettings config) {
|
||||
return config.cloudSql.jdbcUrl;
|
||||
}
|
||||
|
||||
@Provides
|
||||
@Config("cloudSqlInstanceConnectionName")
|
||||
public static String providesCloudSqlInstanceConnectionName(RegistryConfigSettings config) {
|
||||
public static String provideCloudSqlInstanceConnectionName(RegistryConfigSettings config) {
|
||||
return config.cloudSql.instanceConnectionName;
|
||||
}
|
||||
|
||||
@Provides
|
||||
@Config("cloudSqlReplicaInstanceConnectionName")
|
||||
public static Optional<String> provideCloudSqlReplicaInstanceConnectionName(
|
||||
RegistryConfigSettings config) {
|
||||
return Optional.ofNullable(config.cloudSql.replicaInstanceConnectionName);
|
||||
}
|
||||
|
||||
@Provides
|
||||
@Config("cloudSqlDbInstanceName")
|
||||
public static String providesCloudSqlDbInstance(RegistryConfigSettings config) {
|
||||
public static String provideCloudSqlDbInstance(RegistryConfigSettings config) {
|
||||
// Format of instanceConnectionName: project-id:region:instance-name
|
||||
int lastColonIndex = config.cloudSql.instanceConnectionName.lastIndexOf(':');
|
||||
return config.cloudSql.instanceConnectionName.substring(lastColonIndex + 1);
|
||||
@@ -1524,6 +1532,31 @@ public final class RegistryConfig {
|
||||
return CONFIG_SETTINGS.get().hibernate.hikariIdleTimeout;
|
||||
}
|
||||
|
||||
/**
|
||||
* JDBC-specific: driver default batch size is 0, which means that every INSERT statement will be
|
||||
* sent to the database individually. Batching allows us to group together multiple inserts into
|
||||
* one single INSERT statement which can dramatically increase speed in situations with many
|
||||
* inserts.
|
||||
*
|
||||
* <p>Hibernate docs, i.e.
|
||||
* https://docs.jboss.org/hibernate/orm/5.6/userguide/html_single/Hibernate_User_Guide.html,
|
||||
* recommend between 10 and 50.
|
||||
*/
|
||||
public static String getHibernateJdbcBatchSize() {
|
||||
return CONFIG_SETTINGS.get().hibernate.jdbcBatchSize;
|
||||
}
|
||||
|
||||
/**
|
||||
* Returns the JDBC fetch size.
|
||||
*
|
||||
* <p>Postgresql-specific: driver default fetch size is 0, which disables streaming result sets.
|
||||
* Here we set a small default geared toward Nomulus server transactions. Large queries can
|
||||
* override the defaults using {@link JpaTransactionManager#setQueryFetchSize}.
|
||||
*/
|
||||
public static String getHibernateJdbcFetchSize() {
|
||||
return CONFIG_SETTINGS.get().hibernate.jdbcFetchSize;
|
||||
}
|
||||
|
||||
/** Returns the roid suffix to be used for the roids of all contacts and hosts. */
|
||||
public static String getContactAndHostRoidSuffix() {
|
||||
return CONFIG_SETTINGS.get().registryPolicy.contactAndHostRoidSuffix;
|
||||
|
||||
@@ -120,6 +120,8 @@ public class RegistryConfigSettings {
|
||||
public String hikariMinimumIdle;
|
||||
public String hikariMaximumPoolSize;
|
||||
public String hikariIdleTimeout;
|
||||
public String jdbcBatchSize;
|
||||
public String jdbcFetchSize;
|
||||
}
|
||||
|
||||
/** Configuration for Cloud SQL. */
|
||||
@@ -128,6 +130,7 @@ public class RegistryConfigSettings {
|
||||
// TODO(05012021): remove username field after it is removed from all yaml files.
|
||||
public String username;
|
||||
public String instanceConnectionName;
|
||||
public String replicaInstanceConnectionName;
|
||||
}
|
||||
|
||||
/** Configuration for Apache Beam (Cloud Dataflow). */
|
||||
|
||||
@@ -221,6 +221,17 @@ hibernate:
|
||||
hikariMinimumIdle: 1
|
||||
hikariMaximumPoolSize: 10
|
||||
hikariIdleTimeout: 300000
|
||||
# The batch size is basically the number of insertions / updates in a single
|
||||
# transaction that will be batched together into one INSERT/UPDATE statement.
|
||||
# A larger batch size is useful when inserting or updating many entities in a
|
||||
# single transaction. Hibernate docs
|
||||
# (https://docs.jboss.org/hibernate/orm/5.6/userguide/html_single/Hibernate_User_Guide.html)
|
||||
# recommend between 10 and 50.
|
||||
jdbcBatchSize: 50
|
||||
# The fetch size is the number of entities retrieved at a time from the
|
||||
# database cursor. Here we set a small default geared toward Nomulus server
|
||||
# transactions. Large queries can override the defaults on a per-query basis.
|
||||
jdbcFetchSize: 20
|
||||
|
||||
cloudSql:
|
||||
# jdbc url for the Cloud SQL database.
|
||||
@@ -231,6 +242,10 @@ cloudSql:
|
||||
jdbcUrl: jdbc:postgresql://localhost
|
||||
# This name is used by Cloud SQL when connecting to the database.
|
||||
instanceConnectionName: project-id:region:instance-id
|
||||
# If non-null, we will use this instance for certain read-only actions or
|
||||
# pipelines, e.g. RDE, in order to offload some work from the primary
|
||||
# instance. Expect any write actions on this instance to fail.
|
||||
replicaInstanceConnectionName: null
|
||||
|
||||
cloudDns:
|
||||
# Set both properties to null in Production.
|
||||
|
||||
@@ -349,6 +349,15 @@
|
||||
<target>backend</target>
|
||||
</cron>
|
||||
|
||||
<cron>
|
||||
<url><![CDATA[/_dr/cron/replicateToDatastore]]></url>
|
||||
<description>
|
||||
Replays recent transactions from SQL to the Datastore secondary backend.
|
||||
</description>
|
||||
<schedule>every 3 minutes</schedule>
|
||||
<target>backend</target>
|
||||
</cron>
|
||||
|
||||
<cron>
|
||||
<url><![CDATA[/_dr/task/wipeOutContactHistoryPii]]></url>
|
||||
<description>
|
||||
|
||||
@@ -26,7 +26,6 @@ import com.google.common.base.Joiner;
|
||||
import com.google.common.collect.ImmutableList;
|
||||
import com.google.common.collect.ImmutableSortedSet;
|
||||
import com.google.common.collect.Iterables;
|
||||
import com.google.common.collect.Streams;
|
||||
import com.google.common.flogger.FluentLogger;
|
||||
import com.google.common.net.MediaType;
|
||||
import google.registry.config.RegistryConfig.Config;
|
||||
@@ -143,7 +142,7 @@ public class ExportPremiumTermsAction implements Runnable {
|
||||
PremiumListDao.getLatestRevision(premiumListName).isPresent(),
|
||||
"Could not load premium list for " + tld);
|
||||
SortedSet<String> premiumTerms =
|
||||
Streams.stream(PremiumListDao.loadAllPremiumEntries(premiumListName))
|
||||
PremiumListDao.loadAllPremiumEntries(premiumListName).stream()
|
||||
.map(PremiumEntry::toString)
|
||||
.collect(ImmutableSortedSet.toImmutableSortedSet(String::compareTo));
|
||||
|
||||
|
||||
@@ -15,6 +15,7 @@
|
||||
package google.registry.model;
|
||||
|
||||
import com.google.common.collect.ImmutableSet;
|
||||
import google.registry.model.annotations.DeleteAfterMigration;
|
||||
import google.registry.model.billing.BillingEvent;
|
||||
import google.registry.model.common.Cursor;
|
||||
import google.registry.model.common.EntityGroupRoot;
|
||||
@@ -45,6 +46,7 @@ import google.registry.model.server.ServerSecret;
|
||||
import google.registry.model.tld.Registry;
|
||||
|
||||
/** Sets of classes of the Objectify-registered entities in use throughout the model. */
|
||||
@DeleteAfterMigration
|
||||
public final class EntityClasses {
|
||||
|
||||
/** Set of entity classes. */
|
||||
|
||||
@@ -20,6 +20,7 @@ import com.google.appengine.api.datastore.DatastoreServiceFactory;
|
||||
import com.google.common.annotations.VisibleForTesting;
|
||||
import google.registry.beam.common.RegistryPipelineWorkerInitializer;
|
||||
import google.registry.config.RegistryEnvironment;
|
||||
import google.registry.model.annotations.DeleteAfterMigration;
|
||||
import java.util.concurrent.atomic.AtomicLong;
|
||||
|
||||
/**
|
||||
@@ -28,6 +29,7 @@ import java.util.concurrent.atomic.AtomicLong;
|
||||
* <p>In non-test, non-beam environments the Id is generated by Datastore, otherwise it's from an
|
||||
* atomic long number that's incremented every time this method is called.
|
||||
*/
|
||||
@DeleteAfterMigration
|
||||
public final class IdService {
|
||||
|
||||
/**
|
||||
|
||||
@@ -19,12 +19,14 @@ import static com.google.common.base.Predicates.subtypeOf;
|
||||
import static java.util.stream.Collectors.joining;
|
||||
|
||||
import com.google.common.collect.Ordering;
|
||||
import google.registry.model.annotations.DeleteAfterMigration;
|
||||
import java.util.ArrayDeque;
|
||||
import java.util.Queue;
|
||||
import java.util.SortedSet;
|
||||
import java.util.TreeSet;
|
||||
|
||||
/** Utility methods for getting the version of the model schema from the model code. */
|
||||
@DeleteAfterMigration
|
||||
public final class SchemaVersion {
|
||||
|
||||
/**
|
||||
|
||||
@@ -20,11 +20,13 @@ import com.googlecode.objectify.Key;
|
||||
import com.googlecode.objectify.annotation.Id;
|
||||
import com.googlecode.objectify.annotation.Parent;
|
||||
import google.registry.model.ImmutableObject;
|
||||
import google.registry.model.annotations.DeleteAfterMigration;
|
||||
import google.registry.model.annotations.InCrossTld;
|
||||
import javax.persistence.MappedSuperclass;
|
||||
import javax.persistence.Transient;
|
||||
|
||||
/** A singleton entity in Datastore. */
|
||||
@DeleteAfterMigration
|
||||
@MappedSuperclass
|
||||
@InCrossTld
|
||||
public abstract class CrossTldSingleton extends ImmutableObject {
|
||||
|
||||
@@ -26,6 +26,7 @@ import com.google.common.collect.ImmutableMultimap;
|
||||
import com.google.common.collect.ImmutableSortedMap;
|
||||
import com.google.common.flogger.FluentLogger;
|
||||
import google.registry.config.RegistryEnvironment;
|
||||
import google.registry.model.annotations.DeleteAfterMigration;
|
||||
import google.registry.model.common.TimedTransitionProperty.TimedTransition;
|
||||
import google.registry.model.replay.SqlOnlyEntity;
|
||||
import java.time.Duration;
|
||||
@@ -39,6 +40,7 @@ import org.joda.time.DateTime;
|
||||
* <p>The entity is stored in SQL throughout the entire migration so as to have a single point of
|
||||
* access.
|
||||
*/
|
||||
@DeleteAfterMigration
|
||||
@Entity
|
||||
public class DatabaseMigrationStateSchedule extends CrossTldSingleton implements SqlOnlyEntity {
|
||||
|
||||
|
||||
@@ -19,6 +19,7 @@ import com.googlecode.objectify.Key;
|
||||
import com.googlecode.objectify.annotation.Entity;
|
||||
import com.googlecode.objectify.annotation.Id;
|
||||
import google.registry.model.BackupGroupRoot;
|
||||
import google.registry.model.annotations.DeleteAfterMigration;
|
||||
import google.registry.model.replay.DatastoreOnlyEntity;
|
||||
import javax.annotation.Nullable;
|
||||
|
||||
@@ -37,6 +38,7 @@ import javax.annotation.Nullable;
|
||||
* entity group for the single namespace where global data applicable for all TLDs lived.
|
||||
*/
|
||||
@Entity
|
||||
@DeleteAfterMigration
|
||||
public class EntityGroupRoot extends BackupGroupRoot implements DatastoreOnlyEntity {
|
||||
|
||||
@SuppressWarnings("unused")
|
||||
|
||||
@@ -24,12 +24,15 @@ import com.googlecode.objectify.annotation.Index;
|
||||
import com.googlecode.objectify.annotation.Parent;
|
||||
import google.registry.model.BackupGroupRoot;
|
||||
import google.registry.model.EppResource;
|
||||
import google.registry.model.annotations.DeleteAfterMigration;
|
||||
import google.registry.model.annotations.ReportedOn;
|
||||
import google.registry.model.replay.DatastoreOnlyEntity;
|
||||
import google.registry.persistence.VKey;
|
||||
|
||||
/** An index that allows for quick enumeration of all EppResource entities (e.g. via map reduce). */
|
||||
@ReportedOn
|
||||
@Entity
|
||||
@DeleteAfterMigration
|
||||
public class EppResourceIndex extends BackupGroupRoot implements DatastoreOnlyEntity {
|
||||
|
||||
@Id String id;
|
||||
@@ -64,8 +67,9 @@ public class EppResourceIndex extends BackupGroupRoot implements DatastoreOnlyEn
|
||||
EppResourceIndex instance = instantiate(EppResourceIndex.class);
|
||||
instance.reference = resourceKey;
|
||||
instance.kind = resourceKey.getKind();
|
||||
// TODO(b/207368050): figure out if this value has ever been used other than test cases
|
||||
instance.id = resourceKey.getString(); // creates a web-safe key string
|
||||
// creates a web-safe key string, this value is never used
|
||||
// TODO(b/211785379): remove unused id
|
||||
instance.id = VKey.from(resourceKey).stringify();
|
||||
instance.bucket = bucket;
|
||||
return instance;
|
||||
}
|
||||
|
||||
@@ -23,12 +23,14 @@ import com.googlecode.objectify.annotation.Entity;
|
||||
import com.googlecode.objectify.annotation.Id;
|
||||
import google.registry.model.EppResource;
|
||||
import google.registry.model.ImmutableObject;
|
||||
import google.registry.model.annotations.DeleteAfterMigration;
|
||||
import google.registry.model.annotations.VirtualEntity;
|
||||
import google.registry.model.replay.DatastoreOnlyEntity;
|
||||
|
||||
/** A virtual entity to represent buckets to which EppResourceIndex objects are randomly added. */
|
||||
@Entity
|
||||
@VirtualEntity
|
||||
@DeleteAfterMigration
|
||||
public class EppResourceIndexBucket extends ImmutableObject implements DatastoreOnlyEntity {
|
||||
|
||||
@SuppressWarnings("unused")
|
||||
|
||||
@@ -43,6 +43,7 @@ import com.googlecode.objectify.annotation.Index;
|
||||
import google.registry.config.RegistryConfig;
|
||||
import google.registry.model.BackupGroupRoot;
|
||||
import google.registry.model.EppResource;
|
||||
import google.registry.model.annotations.DeleteAfterMigration;
|
||||
import google.registry.model.annotations.ReportedOn;
|
||||
import google.registry.model.contact.ContactResource;
|
||||
import google.registry.model.domain.DomainBase;
|
||||
@@ -65,6 +66,7 @@ import org.joda.time.Duration;
|
||||
* the foreign key string. The instance is never deleted, but it is updated if a newer entity
|
||||
* becomes the active entity.
|
||||
*/
|
||||
@DeleteAfterMigration
|
||||
public abstract class ForeignKeyIndex<E extends EppResource> extends BackupGroupRoot {
|
||||
|
||||
/** The {@link ForeignKeyIndex} type for {@link ContactResource} entities. */
|
||||
|
||||
@@ -23,6 +23,7 @@ import com.googlecode.objectify.Key;
|
||||
import com.googlecode.objectify.Result;
|
||||
import com.googlecode.objectify.cmd.DeleteType;
|
||||
import com.googlecode.objectify.cmd.Deleter;
|
||||
import google.registry.model.annotations.DeleteAfterMigration;
|
||||
import java.util.Arrays;
|
||||
import java.util.stream.Stream;
|
||||
|
||||
@@ -30,6 +31,7 @@ import java.util.stream.Stream;
|
||||
* A Deleter that forwards to {@code auditedOfy().delete()}, but can be augmented via subclassing to
|
||||
* do custom processing on the keys to be deleted prior to their deletion.
|
||||
*/
|
||||
@DeleteAfterMigration
|
||||
abstract class AugmentedDeleter implements Deleter {
|
||||
private final Deleter delegate = ofy().delete();
|
||||
|
||||
|
||||
@@ -21,13 +21,15 @@ import com.google.common.collect.ImmutableList;
|
||||
import com.googlecode.objectify.Key;
|
||||
import com.googlecode.objectify.Result;
|
||||
import com.googlecode.objectify.cmd.Saver;
|
||||
import google.registry.model.annotations.DeleteAfterMigration;
|
||||
import java.util.Arrays;
|
||||
import java.util.Map;
|
||||
|
||||
/**
|
||||
* A Saver that forwards to {@code ofy().save()}, but can be augmented via subclassing to
|
||||
* do custom processing on the entities to be saved prior to their saving.
|
||||
* A Saver that forwards to {@code ofy().save()}, but can be augmented via subclassing to do custom
|
||||
* processing on the entities to be saved prior to their saving.
|
||||
*/
|
||||
@DeleteAfterMigration
|
||||
abstract class AugmentedSaver implements Saver {
|
||||
private final Saver delegate = ofy().save();
|
||||
|
||||
|
||||
@@ -31,6 +31,7 @@ import com.googlecode.objectify.annotation.Id;
|
||||
import google.registry.config.RegistryConfig;
|
||||
import google.registry.model.Buildable;
|
||||
import google.registry.model.ImmutableObject;
|
||||
import google.registry.model.annotations.DeleteAfterMigration;
|
||||
import google.registry.model.annotations.NotBackedUp;
|
||||
import google.registry.model.annotations.NotBackedUp.Reason;
|
||||
import google.registry.model.replay.DatastoreOnlyEntity;
|
||||
@@ -51,6 +52,7 @@ import org.joda.time.DateTime;
|
||||
*/
|
||||
@Entity
|
||||
@NotBackedUp(reason = Reason.COMMIT_LOGS)
|
||||
@DeleteAfterMigration
|
||||
public class CommitLogBucket extends ImmutableObject implements Buildable, DatastoreOnlyEntity {
|
||||
|
||||
/**
|
||||
|
||||
@@ -25,6 +25,7 @@ import com.googlecode.objectify.annotation.Entity;
|
||||
import com.googlecode.objectify.annotation.Id;
|
||||
import com.googlecode.objectify.annotation.Parent;
|
||||
import google.registry.model.ImmutableObject;
|
||||
import google.registry.model.annotations.DeleteAfterMigration;
|
||||
import google.registry.model.annotations.NotBackedUp;
|
||||
import google.registry.model.annotations.NotBackedUp.Reason;
|
||||
import google.registry.model.replay.DatastoreOnlyEntity;
|
||||
@@ -45,6 +46,7 @@ import org.joda.time.DateTime;
|
||||
*/
|
||||
@Entity
|
||||
@NotBackedUp(reason = Reason.COMMIT_LOGS)
|
||||
@DeleteAfterMigration
|
||||
public class CommitLogCheckpoint extends ImmutableObject implements DatastoreOnlyEntity {
|
||||
|
||||
/** Shared singleton parent entity for commit log checkpoints. */
|
||||
|
||||
@@ -21,6 +21,7 @@ import com.googlecode.objectify.Key;
|
||||
import com.googlecode.objectify.annotation.Entity;
|
||||
import com.googlecode.objectify.annotation.Id;
|
||||
import google.registry.model.ImmutableObject;
|
||||
import google.registry.model.annotations.DeleteAfterMigration;
|
||||
import google.registry.model.annotations.NotBackedUp;
|
||||
import google.registry.model.annotations.NotBackedUp.Reason;
|
||||
import google.registry.model.replay.DatastoreOnlyEntity;
|
||||
@@ -29,6 +30,7 @@ import org.joda.time.DateTime;
|
||||
/** Singleton parent entity for all commit log checkpoints. */
|
||||
@Entity
|
||||
@NotBackedUp(reason = Reason.COMMIT_LOGS)
|
||||
@DeleteAfterMigration
|
||||
public class CommitLogCheckpointRoot extends ImmutableObject implements DatastoreOnlyEntity {
|
||||
|
||||
public static final long SINGLETON_ID = 1; // There is always exactly one of these.
|
||||
|
||||
@@ -23,6 +23,7 @@ import com.googlecode.objectify.annotation.Entity;
|
||||
import com.googlecode.objectify.annotation.Id;
|
||||
import com.googlecode.objectify.annotation.Parent;
|
||||
import google.registry.model.ImmutableObject;
|
||||
import google.registry.model.annotations.DeleteAfterMigration;
|
||||
import google.registry.model.annotations.NotBackedUp;
|
||||
import google.registry.model.annotations.NotBackedUp.Reason;
|
||||
import google.registry.model.replay.DatastoreOnlyEntity;
|
||||
@@ -39,6 +40,7 @@ import org.joda.time.DateTime;
|
||||
*/
|
||||
@Entity
|
||||
@NotBackedUp(reason = Reason.COMMIT_LOGS)
|
||||
@DeleteAfterMigration
|
||||
public class CommitLogManifest extends ImmutableObject implements DatastoreOnlyEntity {
|
||||
|
||||
/** Commit log manifests are parented on a random bucket. */
|
||||
|
||||
@@ -27,6 +27,7 @@ import com.googlecode.objectify.annotation.Entity;
|
||||
import com.googlecode.objectify.annotation.Id;
|
||||
import com.googlecode.objectify.annotation.Parent;
|
||||
import google.registry.model.ImmutableObject;
|
||||
import google.registry.model.annotations.DeleteAfterMigration;
|
||||
import google.registry.model.annotations.NotBackedUp;
|
||||
import google.registry.model.annotations.NotBackedUp.Reason;
|
||||
import google.registry.model.replay.DatastoreOnlyEntity;
|
||||
@@ -34,6 +35,7 @@ import google.registry.model.replay.DatastoreOnlyEntity;
|
||||
/** Representation of a saved entity in a {@link CommitLogManifest} (not deletes). */
|
||||
@Entity
|
||||
@NotBackedUp(reason = Reason.COMMIT_LOGS)
|
||||
@DeleteAfterMigration
|
||||
public class CommitLogMutation extends ImmutableObject implements DatastoreOnlyEntity {
|
||||
|
||||
/** The manifest this belongs to. */
|
||||
|
||||
@@ -30,6 +30,7 @@ import com.google.common.collect.ImmutableSet;
|
||||
import com.googlecode.objectify.Key;
|
||||
import google.registry.model.BackupGroupRoot;
|
||||
import google.registry.model.ImmutableObject;
|
||||
import google.registry.model.annotations.DeleteAfterMigration;
|
||||
import google.registry.util.Clock;
|
||||
import java.util.HashSet;
|
||||
import java.util.Map;
|
||||
@@ -39,6 +40,7 @@ import java.util.function.Supplier;
|
||||
import org.joda.time.DateTime;
|
||||
|
||||
/** Wrapper for {@link Supplier} that associates a time with each attempt. */
|
||||
@DeleteAfterMigration
|
||||
class CommitLoggedWork<R> implements Runnable {
|
||||
|
||||
private final Supplier<R> work;
|
||||
|
||||
@@ -33,6 +33,7 @@ import com.googlecode.objectify.Key;
|
||||
import com.googlecode.objectify.Result;
|
||||
import com.googlecode.objectify.cmd.Query;
|
||||
import google.registry.model.ImmutableObject;
|
||||
import google.registry.model.annotations.DeleteAfterMigration;
|
||||
import google.registry.model.annotations.InCrossTld;
|
||||
import google.registry.model.contact.ContactHistory;
|
||||
import google.registry.model.domain.DomainHistory;
|
||||
@@ -55,6 +56,7 @@ import javax.persistence.NonUniqueResultException;
|
||||
import org.joda.time.DateTime;
|
||||
|
||||
/** Datastore implementation of {@link TransactionManager}. */
|
||||
@DeleteAfterMigration
|
||||
public class DatastoreTransactionManager implements TransactionManager {
|
||||
|
||||
private Ofy injectedOfy;
|
||||
|
||||
@@ -16,6 +16,7 @@ package google.registry.model.ofy;
|
||||
|
||||
import com.google.common.annotations.VisibleForTesting;
|
||||
import com.google.common.collect.ImmutableMap;
|
||||
import google.registry.model.annotations.DeleteAfterMigration;
|
||||
|
||||
/**
|
||||
* Contains the mapping from class names to SQL-replay-write priorities.
|
||||
@@ -26,6 +27,7 @@ import com.google.common.collect.ImmutableMap;
|
||||
* values represent an earlier write (and later delete). Higher-valued classes can have foreign keys
|
||||
* on lower-valued classes, but not vice versa.
|
||||
*/
|
||||
@DeleteAfterMigration
|
||||
public class EntityWritePriorities {
|
||||
|
||||
/**
|
||||
|
||||
@@ -35,6 +35,7 @@ import google.registry.config.RegistryEnvironment;
|
||||
import google.registry.model.Buildable;
|
||||
import google.registry.model.EntityClasses;
|
||||
import google.registry.model.ImmutableObject;
|
||||
import google.registry.model.annotations.DeleteAfterMigration;
|
||||
import google.registry.model.translators.BloomFilterOfStringTranslatorFactory;
|
||||
import google.registry.model.translators.CidrAddressBlockTranslatorFactory;
|
||||
import google.registry.model.translators.CommitLogRevisionsTranslatorFactory;
|
||||
@@ -52,6 +53,7 @@ import google.registry.model.translators.VKeyTranslatorFactory;
|
||||
* objects. The class contains a static initializer to call factory().register(...) on all
|
||||
* persistable objects in this package.
|
||||
*/
|
||||
@DeleteAfterMigration
|
||||
public class ObjectifyService {
|
||||
|
||||
/** A singleton instance of our Ofy wrapper. */
|
||||
|
||||
@@ -37,6 +37,7 @@ import com.googlecode.objectify.ObjectifyFactory;
|
||||
import com.googlecode.objectify.cmd.Deleter;
|
||||
import com.googlecode.objectify.cmd.Loader;
|
||||
import com.googlecode.objectify.cmd.Saver;
|
||||
import google.registry.model.annotations.DeleteAfterMigration;
|
||||
import google.registry.model.annotations.NotBackedUp;
|
||||
import google.registry.model.annotations.VirtualEntity;
|
||||
import google.registry.model.ofy.ReadOnlyWork.KillTransactionException;
|
||||
@@ -59,6 +60,7 @@ import org.joda.time.Duration;
|
||||
* simpler to wrap {@link Objectify} rather than extend it because this way we can remove some
|
||||
* methods that we don't really want exposed and add some shortcuts.
|
||||
*/
|
||||
@DeleteAfterMigration
|
||||
public class Ofy {
|
||||
|
||||
private static final FluentLogger logger = FluentLogger.forEnclosingClass();
|
||||
|
||||
@@ -14,6 +14,7 @@
|
||||
|
||||
package google.registry.model.ofy;
|
||||
|
||||
import google.registry.model.annotations.DeleteAfterMigration;
|
||||
import java.io.IOException;
|
||||
import javax.servlet.Filter;
|
||||
import javax.servlet.FilterChain;
|
||||
@@ -23,6 +24,7 @@ import javax.servlet.ServletRequest;
|
||||
import javax.servlet.ServletResponse;
|
||||
|
||||
/** A filter that statically registers types with Objectify. */
|
||||
@DeleteAfterMigration
|
||||
public class OfyFilter implements Filter {
|
||||
|
||||
@Override
|
||||
|
||||
@@ -14,10 +14,12 @@
|
||||
|
||||
package google.registry.model.ofy;
|
||||
|
||||
import google.registry.model.annotations.DeleteAfterMigration;
|
||||
import google.registry.util.Clock;
|
||||
import java.util.function.Supplier;
|
||||
|
||||
/** Wrapper for {@link Supplier} that disallows mutations and fails the transaction at the end. */
|
||||
@DeleteAfterMigration
|
||||
class ReadOnlyWork<R> extends CommitLoggedWork<R> {
|
||||
|
||||
ReadOnlyWork(Supplier<R> work, Clock clock) {
|
||||
|
||||
@@ -22,6 +22,7 @@ import com.google.common.collect.ImmutableMap;
|
||||
import com.googlecode.objectify.Key;
|
||||
import google.registry.config.RegistryEnvironment;
|
||||
import google.registry.model.UpdateAutoTimestamp;
|
||||
import google.registry.model.annotations.DeleteAfterMigration;
|
||||
import google.registry.model.replay.DatastoreEntity;
|
||||
import google.registry.model.replay.ReplaySpecializer;
|
||||
import google.registry.persistence.VKey;
|
||||
@@ -34,6 +35,7 @@ import java.util.concurrent.ConcurrentLinkedQueue;
|
||||
*
|
||||
* <p>This code is to be removed when the actual replay cron job is implemented.
|
||||
*/
|
||||
@DeleteAfterMigration
|
||||
public class ReplayQueue {
|
||||
|
||||
static ConcurrentLinkedQueue<ImmutableMap<Key<?>, Object>> queue =
|
||||
|
||||
@@ -28,6 +28,7 @@ import com.google.appengine.api.datastore.Query;
|
||||
import com.google.appengine.api.datastore.Transaction;
|
||||
import com.google.appengine.api.datastore.TransactionOptions;
|
||||
import com.google.common.collect.ImmutableList;
|
||||
import google.registry.model.annotations.DeleteAfterMigration;
|
||||
import java.util.ArrayList;
|
||||
import java.util.Collection;
|
||||
import java.util.List;
|
||||
@@ -35,6 +36,7 @@ import java.util.Map;
|
||||
import java.util.concurrent.Future;
|
||||
|
||||
/** A proxy for {@link AsyncDatastoreService} that exposes call counts. */
|
||||
@DeleteAfterMigration
|
||||
public class RequestCapturingAsyncDatastoreService implements AsyncDatastoreService {
|
||||
|
||||
private final AsyncDatastoreService delegate;
|
||||
|
||||
@@ -18,8 +18,10 @@ import com.google.common.collect.ImmutableSet;
|
||||
import com.googlecode.objectify.Key;
|
||||
import com.googlecode.objectify.ObjectifyFactory;
|
||||
import com.googlecode.objectify.impl.ObjectifyImpl;
|
||||
import google.registry.model.annotations.DeleteAfterMigration;
|
||||
|
||||
/** Registry-specific Objectify subclass that exposes the keys used in the current session. */
|
||||
@DeleteAfterMigration
|
||||
public class SessionKeyExposingObjectify extends ObjectifyImpl<SessionKeyExposingObjectify> {
|
||||
|
||||
public SessionKeyExposingObjectify(ObjectifyFactory factory) {
|
||||
|
||||
@@ -17,6 +17,7 @@ package google.registry.model.ofy;
|
||||
import com.googlecode.objectify.Key;
|
||||
import com.googlecode.objectify.Objectify;
|
||||
import google.registry.model.BackupGroupRoot;
|
||||
import google.registry.model.annotations.DeleteAfterMigration;
|
||||
import java.util.Arrays;
|
||||
import java.util.Map;
|
||||
import org.joda.time.DateTime;
|
||||
@@ -25,6 +26,7 @@ import org.joda.time.DateTime;
|
||||
* Exception when trying to write to Datastore with a timestamp that is inconsistent with a partial
|
||||
* ordering on transactions that touch the same entities.
|
||||
*/
|
||||
@DeleteAfterMigration
|
||||
class TimestampInversionException extends RuntimeException {
|
||||
|
||||
static String getFileAndLine(StackTraceElement callsite) {
|
||||
|
||||
@@ -26,10 +26,12 @@ import com.google.common.annotations.VisibleForTesting;
|
||||
import com.google.common.collect.ImmutableMap;
|
||||
import com.google.common.collect.ImmutableSet;
|
||||
import com.googlecode.objectify.Key;
|
||||
import google.registry.model.annotations.DeleteAfterMigration;
|
||||
import java.util.Map;
|
||||
import org.joda.time.DateTime;
|
||||
|
||||
/** Metadata for an {@link Ofy} transaction that saves commit logs. */
|
||||
@DeleteAfterMigration
|
||||
public class TransactionInfo {
|
||||
|
||||
@VisibleForTesting
|
||||
|
||||
@@ -14,9 +14,11 @@
|
||||
|
||||
package google.registry.model.replay;
|
||||
|
||||
import google.registry.model.annotations.DeleteAfterMigration;
|
||||
import java.util.Optional;
|
||||
|
||||
/** An entity that has the same Java object representation in SQL and Datastore. */
|
||||
@DeleteAfterMigration
|
||||
public interface DatastoreAndSqlEntity extends DatastoreEntity, SqlEntity {
|
||||
|
||||
@Override
|
||||
|
||||
@@ -14,6 +14,7 @@
|
||||
|
||||
package google.registry.model.replay;
|
||||
|
||||
import google.registry.model.annotations.DeleteAfterMigration;
|
||||
import java.util.Optional;
|
||||
|
||||
/**
|
||||
@@ -24,6 +25,7 @@ import java.util.Optional;
|
||||
* transactions and data into the secondary SQL store during the first, Datastore-primary, phase of
|
||||
* the migration.
|
||||
*/
|
||||
@DeleteAfterMigration
|
||||
public interface DatastoreEntity {
|
||||
|
||||
Optional<SqlEntity> toSqlEntity();
|
||||
|
||||
@@ -14,9 +14,11 @@
|
||||
|
||||
package google.registry.model.replay;
|
||||
|
||||
import google.registry.model.annotations.DeleteAfterMigration;
|
||||
import java.util.Optional;
|
||||
|
||||
/** An entity that is only stored in Datastore, that should not be replayed to SQL. */
|
||||
@DeleteAfterMigration
|
||||
public interface DatastoreOnlyEntity extends DatastoreEntity {
|
||||
@Override
|
||||
default Optional<SqlEntity> toSqlEntity() {
|
||||
|
||||
@@ -22,9 +22,11 @@ import com.googlecode.objectify.Key;
|
||||
import com.googlecode.objectify.annotation.Entity;
|
||||
import com.googlecode.objectify.annotation.Id;
|
||||
import google.registry.model.ImmutableObject;
|
||||
import google.registry.model.annotations.DeleteAfterMigration;
|
||||
|
||||
/** Datastore entity to keep track of the last SQL transaction imported into the datastore. */
|
||||
@Entity
|
||||
@DeleteAfterMigration
|
||||
public class LastSqlTransaction extends ImmutableObject implements DatastoreOnlyEntity {
|
||||
|
||||
/** The key for this singleton. */
|
||||
|
||||
@@ -14,6 +14,7 @@
|
||||
|
||||
package google.registry.model.replay;
|
||||
|
||||
import google.registry.model.annotations.DeleteAfterMigration;
|
||||
import java.util.Optional;
|
||||
|
||||
/**
|
||||
@@ -21,6 +22,7 @@ import java.util.Optional;
|
||||
*
|
||||
* <p>We expect that this is a result of the entity being dually-written.
|
||||
*/
|
||||
@DeleteAfterMigration
|
||||
public interface NonReplicatedEntity extends DatastoreEntity, SqlEntity {
|
||||
|
||||
@Override
|
||||
|
||||
@@ -14,6 +14,7 @@
|
||||
|
||||
package google.registry.model.replay;
|
||||
|
||||
import google.registry.model.annotations.DeleteAfterMigration;
|
||||
import google.registry.persistence.VKey;
|
||||
import java.lang.reflect.InvocationTargetException;
|
||||
import java.lang.reflect.Method;
|
||||
@@ -25,6 +26,7 @@ import java.lang.reflect.Method;
|
||||
* not directly present in the other database. This class allows us to do that by using reflection
|
||||
* to invoke special class methods if they are present.
|
||||
*/
|
||||
@DeleteAfterMigration
|
||||
public class ReplaySpecializer {
|
||||
|
||||
public static void beforeSqlDelete(VKey<?> key) {
|
||||
|
||||
@@ -27,6 +27,7 @@ import com.google.common.annotations.VisibleForTesting;
|
||||
import com.google.common.collect.ImmutableList;
|
||||
import com.google.common.flogger.FluentLogger;
|
||||
import google.registry.model.UpdateAutoTimestamp;
|
||||
import google.registry.model.annotations.DeleteAfterMigration;
|
||||
import google.registry.model.common.DatabaseMigrationStateSchedule;
|
||||
import google.registry.model.common.DatabaseMigrationStateSchedule.MigrationState;
|
||||
import google.registry.model.common.DatabaseMigrationStateSchedule.ReplayDirection;
|
||||
@@ -53,6 +54,7 @@ import org.joda.time.Duration;
|
||||
automaticallyPrintOk = true,
|
||||
auth = Auth.AUTH_INTERNAL_OR_ADMIN)
|
||||
@VisibleForTesting
|
||||
@DeleteAfterMigration
|
||||
public class ReplicateToDatastoreAction implements Runnable {
|
||||
public static final String PATH = "/_dr/cron/replicateToDatastore";
|
||||
private static final FluentLogger logger = FluentLogger.forEnclosingClass();
|
||||
|
||||
@@ -16,6 +16,7 @@ package google.registry.model.replay;
|
||||
|
||||
import static google.registry.persistence.transaction.TransactionManagerFactory.jpaTm;
|
||||
|
||||
import google.registry.model.annotations.DeleteAfterMigration;
|
||||
import java.util.Optional;
|
||||
|
||||
/**
|
||||
@@ -25,6 +26,7 @@ import java.util.Optional;
|
||||
* <p>This will be used when replaying SQL transactions into Datastore, during the second,
|
||||
* SQL-primary, phase of the migration from Datastore to SQL.
|
||||
*/
|
||||
@DeleteAfterMigration
|
||||
public interface SqlEntity {
|
||||
|
||||
Optional<DatastoreEntity> toDatastoreEntity();
|
||||
|
||||
@@ -14,9 +14,11 @@
|
||||
|
||||
package google.registry.model.replay;
|
||||
|
||||
import google.registry.model.annotations.DeleteAfterMigration;
|
||||
import java.util.Optional;
|
||||
|
||||
/** An entity that is only stored in SQL, that should not be replayed to Datastore. */
|
||||
@DeleteAfterMigration
|
||||
public interface SqlOnlyEntity extends SqlEntity {
|
||||
@Override
|
||||
default Optional<DatastoreEntity> toDatastoreEntity() {
|
||||
|
||||
@@ -17,12 +17,14 @@ package google.registry.model.replay;
|
||||
import static google.registry.persistence.transaction.TransactionManagerFactory.jpaTm;
|
||||
import static google.registry.util.DateTimeUtils.START_OF_TIME;
|
||||
|
||||
import google.registry.model.annotations.DeleteAfterMigration;
|
||||
import google.registry.model.common.CrossTldSingleton;
|
||||
import javax.persistence.Column;
|
||||
import javax.persistence.Entity;
|
||||
import org.joda.time.DateTime;
|
||||
|
||||
@Entity
|
||||
@DeleteAfterMigration
|
||||
public class SqlReplayCheckpoint extends CrossTldSingleton implements SqlOnlyEntity {
|
||||
|
||||
@Column(nullable = false)
|
||||
|
||||
@@ -21,7 +21,6 @@ import static com.google.common.hash.Funnels.stringFunnel;
|
||||
|
||||
import com.google.common.base.Splitter;
|
||||
import com.google.common.collect.ImmutableMap;
|
||||
import com.google.common.collect.Streams;
|
||||
import com.google.common.hash.BloomFilter;
|
||||
import google.registry.model.Buildable;
|
||||
import google.registry.model.ImmutableObject;
|
||||
@@ -86,9 +85,8 @@ public final class PremiumList extends BaseDomainLabelList<BigDecimal, PremiumEn
|
||||
*/
|
||||
public synchronized ImmutableMap<String, BigDecimal> getLabelsToPrices() {
|
||||
if (labelsToPrices == null) {
|
||||
Iterable<PremiumEntry> entries = PremiumListDao.loadAllPremiumEntries(name);
|
||||
labelsToPrices =
|
||||
Streams.stream(entries)
|
||||
PremiumListDao.loadAllPremiumEntries(name).stream()
|
||||
.collect(
|
||||
toImmutableMap(
|
||||
PremiumEntry::getDomainLabel,
|
||||
|
||||
@@ -28,8 +28,8 @@ import com.google.common.cache.CacheBuilder;
|
||||
import com.google.common.cache.CacheLoader;
|
||||
import com.google.common.cache.CacheLoader.InvalidCacheLoadException;
|
||||
import com.google.common.cache.LoadingCache;
|
||||
import com.google.common.collect.ImmutableList;
|
||||
import com.google.common.collect.ImmutableSet;
|
||||
import com.google.common.collect.Streams;
|
||||
import google.registry.model.tld.label.PremiumList.PremiumEntry;
|
||||
import google.registry.util.NonFinalForTesting;
|
||||
import java.math.BigDecimal;
|
||||
@@ -56,8 +56,7 @@ public class PremiumListDao {
|
||||
* <p>This is cached for a shorter duration because we need to periodically reload this entity to
|
||||
* check if a new revision has been published, and if so, then use that.
|
||||
*
|
||||
* <p>We also cache the absence of premium lists with a given name to avoid unnecessary pointless
|
||||
* lookups. Note that this cache is only applicable to PremiumList objects stored in SQL.
|
||||
* <p>We also cache the absence of premium lists with a given name to avoid pointless lookups.
|
||||
*/
|
||||
@NonFinalForTesting
|
||||
static LoadingCache<String, Optional<PremiumList>> premiumListCache =
|
||||
@@ -170,11 +169,10 @@ public class PremiumListDao {
|
||||
|
||||
if (!isNullOrEmpty(premiumList.getLabelsToPrices())) {
|
||||
ImmutableSet.Builder<PremiumEntry> entries = new ImmutableSet.Builder<>();
|
||||
premiumList.getLabelsToPrices().entrySet().stream()
|
||||
premiumList
|
||||
.getLabelsToPrices()
|
||||
.forEach(
|
||||
entry ->
|
||||
entries.add(
|
||||
PremiumEntry.create(revisionId, entry.getValue(), entry.getKey())));
|
||||
(key, value) -> entries.add(PremiumEntry.create(revisionId, value, key)));
|
||||
jpaTm().insertAll(entries.build());
|
||||
}
|
||||
});
|
||||
@@ -217,7 +215,7 @@ public class PremiumListDao {
|
||||
*
|
||||
* <p>This is an expensive operation and should only be used when the entire list is required.
|
||||
*/
|
||||
public static Iterable<PremiumEntry> loadPremiumEntries(PremiumList premiumList) {
|
||||
public static List<PremiumEntry> loadPremiumEntries(PremiumList premiumList) {
|
||||
return jpaTm()
|
||||
.transact(
|
||||
() ->
|
||||
@@ -254,15 +252,14 @@ public class PremiumListDao {
|
||||
*
|
||||
* <p>This is an expensive operation and should only be used when the entire list is required.
|
||||
*/
|
||||
public static Iterable<PremiumEntry> loadAllPremiumEntries(String premiumListName) {
|
||||
public static ImmutableList<PremiumEntry> loadAllPremiumEntries(String premiumListName) {
|
||||
PremiumList premiumList =
|
||||
getLatestRevision(premiumListName)
|
||||
.orElseThrow(
|
||||
() ->
|
||||
new IllegalArgumentException(
|
||||
String.format("No premium list with name %s.", premiumListName)));
|
||||
Iterable<PremiumEntry> entries = loadPremiumEntries(premiumList);
|
||||
return Streams.stream(entries)
|
||||
return loadPremiumEntries(premiumList).stream()
|
||||
.map(
|
||||
premiumEntry ->
|
||||
new PremiumEntry.Builder()
|
||||
|
||||
@@ -23,6 +23,7 @@ import static google.registry.util.DateTimeUtils.START_OF_TIME;
|
||||
import com.google.common.collect.ImmutableSortedMap;
|
||||
import com.google.common.collect.Ordering;
|
||||
import com.googlecode.objectify.Key;
|
||||
import google.registry.model.annotations.DeleteAfterMigration;
|
||||
import google.registry.model.ofy.CommitLogManifest;
|
||||
import google.registry.persistence.transaction.Transaction;
|
||||
import org.joda.time.DateTime;
|
||||
@@ -31,11 +32,12 @@ import org.joda.time.DateTime;
|
||||
* Objectify translator for {@code ImmutableSortedMap<DateTime, Key<CommitLogManifest>>} fields.
|
||||
*
|
||||
* <p>This translator is responsible for doing three things:
|
||||
*
|
||||
* <ol>
|
||||
* <li>Translating the data into two lists of {@code Date} and {@code Key} objects, in a manner
|
||||
* similar to {@code @Mapify}.
|
||||
* <li>Inserting a key to the transaction's {@link CommitLogManifest} on save.
|
||||
* <li>Truncating the map to include only the last key per day for the last 30 days.
|
||||
* <li>Translating the data into two lists of {@code Date} and {@code Key} objects, in a manner
|
||||
* similar to {@code @Mapify}.
|
||||
* <li>Inserting a key to the transaction's {@link CommitLogManifest} on save.
|
||||
* <li>Truncating the map to include only the last key per day for the last 30 days.
|
||||
* </ol>
|
||||
*
|
||||
* <p>This allows you to have a field on your model object that tracks historical revisions of
|
||||
@@ -46,6 +48,7 @@ import org.joda.time.DateTime;
|
||||
*
|
||||
* @see google.registry.model.EppResource
|
||||
*/
|
||||
@DeleteAfterMigration
|
||||
public final class CommitLogRevisionsTranslatorFactory
|
||||
extends ImmutableSortedMapTranslatorFactory<DateTime, Key<CommitLogManifest>> {
|
||||
|
||||
|
||||
@@ -19,6 +19,7 @@ import google.registry.config.CredentialModule;
|
||||
import google.registry.config.RegistryConfig.ConfigModule;
|
||||
import google.registry.keyring.kms.KmsModule;
|
||||
import google.registry.persistence.PersistenceModule.AppEngineJpaTm;
|
||||
import google.registry.persistence.PersistenceModule.ReadOnlyReplicaJpaTm;
|
||||
import google.registry.persistence.transaction.JpaTransactionManager;
|
||||
import google.registry.privileges.secretmanager.SecretManagerModule;
|
||||
import google.registry.util.UtilsModule;
|
||||
@@ -40,4 +41,7 @@ public interface PersistenceComponent {
|
||||
|
||||
@AppEngineJpaTm
|
||||
JpaTransactionManager appEngineJpaTransactionManager();
|
||||
|
||||
@ReadOnlyReplicaJpaTm
|
||||
JpaTransactionManager readOnlyReplicaJpaTransactionManager();
|
||||
}
|
||||
|
||||
@@ -20,6 +20,8 @@ import static google.registry.config.RegistryConfig.getHibernateHikariConnection
|
||||
import static google.registry.config.RegistryConfig.getHibernateHikariIdleTimeout;
|
||||
import static google.registry.config.RegistryConfig.getHibernateHikariMaximumPoolSize;
|
||||
import static google.registry.config.RegistryConfig.getHibernateHikariMinimumIdle;
|
||||
import static google.registry.config.RegistryConfig.getHibernateJdbcBatchSize;
|
||||
import static google.registry.config.RegistryConfig.getHibernateJdbcFetchSize;
|
||||
import static google.registry.config.RegistryConfig.getHibernateLogSqlQueries;
|
||||
import static google.registry.persistence.transaction.TransactionManagerFactory.tm;
|
||||
|
||||
@@ -76,15 +78,9 @@ public abstract class PersistenceModule {
|
||||
public static final String HIKARI_DS_CLOUD_SQL_INSTANCE =
|
||||
"hibernate.hikari.dataSource.cloudSqlInstance";
|
||||
|
||||
/**
|
||||
* Postgresql-specific: driver default fetch size is 0, which disables streaming result sets. Here
|
||||
* we set a small default geared toward Nomulus server transactions. Large queries can override
|
||||
* the defaults using {@link JpaTransactionManager#setQueryFetchSize}.
|
||||
*/
|
||||
public static final String JDBC_BATCH_SIZE = "hibernate.jdbc.batch_size";
|
||||
public static final String JDBC_FETCH_SIZE = "hibernate.jdbc.fetch_size";
|
||||
|
||||
private static final int DEFAULT_SERVER_FETCH_SIZE = 20;
|
||||
|
||||
@VisibleForTesting
|
||||
@Provides
|
||||
@DefaultHibernateConfigs
|
||||
@@ -111,7 +107,8 @@ public abstract class PersistenceModule {
|
||||
properties.put(HIKARI_MAXIMUM_POOL_SIZE, getHibernateHikariMaximumPoolSize());
|
||||
properties.put(HIKARI_IDLE_TIMEOUT, getHibernateHikariIdleTimeout());
|
||||
properties.put(Environment.DIALECT, NomulusPostgreSQLDialect.class.getName());
|
||||
properties.put(JDBC_FETCH_SIZE, Integer.toString(DEFAULT_SERVER_FETCH_SIZE));
|
||||
properties.put(JDBC_BATCH_SIZE, getHibernateJdbcBatchSize());
|
||||
properties.put(JDBC_FETCH_SIZE, getHibernateJdbcFetchSize());
|
||||
return properties.build();
|
||||
}
|
||||
|
||||
@@ -122,8 +119,11 @@ public abstract class PersistenceModule {
|
||||
@Config("cloudSqlJdbcUrl") String jdbcUrl,
|
||||
@Config("cloudSqlInstanceConnectionName") String instanceConnectionName,
|
||||
@DefaultHibernateConfigs ImmutableMap<String, String> defaultConfigs) {
|
||||
return createPartialSqlConfigs(
|
||||
jdbcUrl, instanceConnectionName, defaultConfigs, Optional.empty());
|
||||
HashMap<String, String> overrides = Maps.newHashMap(defaultConfigs);
|
||||
overrides.put(Environment.URL, jdbcUrl);
|
||||
overrides.put(HIKARI_DS_SOCKET_FACTORY, "com.google.cloud.sql.postgres.SocketFactory");
|
||||
overrides.put(HIKARI_DS_CLOUD_SQL_INSTANCE, instanceConnectionName);
|
||||
return ImmutableMap.copyOf(overrides);
|
||||
}
|
||||
|
||||
/**
|
||||
@@ -184,22 +184,6 @@ public abstract class PersistenceModule {
|
||||
return ImmutableMap.copyOf(overrides);
|
||||
}
|
||||
|
||||
@VisibleForTesting
|
||||
static ImmutableMap<String, String> createPartialSqlConfigs(
|
||||
String jdbcUrl,
|
||||
String instanceConnectionName,
|
||||
ImmutableMap<String, String> defaultConfigs,
|
||||
Optional<Provider<TransactionIsolationLevel>> isolationOverride) {
|
||||
HashMap<String, String> overrides = Maps.newHashMap(defaultConfigs);
|
||||
overrides.put(Environment.URL, jdbcUrl);
|
||||
overrides.put(HIKARI_DS_SOCKET_FACTORY, "com.google.cloud.sql.postgres.SocketFactory");
|
||||
overrides.put(HIKARI_DS_CLOUD_SQL_INSTANCE, instanceConnectionName);
|
||||
isolationOverride
|
||||
.map(Provider::get)
|
||||
.ifPresent(override -> overrides.put(Environment.ISOLATION, override.name()));
|
||||
return ImmutableMap.copyOf(overrides);
|
||||
}
|
||||
|
||||
/**
|
||||
* Provides a {@link Supplier} of single-use JDBC {@link Connection connections} that can manage
|
||||
* the database DDL schema.
|
||||
@@ -280,6 +264,36 @@ public abstract class PersistenceModule {
|
||||
return new JpaTransactionManagerImpl(create(overrides), clock);
|
||||
}
|
||||
|
||||
@Provides
|
||||
@Singleton
|
||||
@ReadOnlyReplicaJpaTm
|
||||
static JpaTransactionManager provideReadOnlyReplicaJpaTm(
|
||||
SqlCredentialStore credentialStore,
|
||||
@PartialCloudSqlConfigs ImmutableMap<String, String> cloudSqlConfigs,
|
||||
@Config("cloudSqlReplicaInstanceConnectionName")
|
||||
Optional<String> replicaInstanceConnectionName,
|
||||
Clock clock) {
|
||||
HashMap<String, String> overrides = Maps.newHashMap(cloudSqlConfigs);
|
||||
setSqlCredential(credentialStore, new RobotUser(RobotId.NOMULUS), overrides);
|
||||
replicaInstanceConnectionName.ifPresent(
|
||||
name -> overrides.put(HIKARI_DS_CLOUD_SQL_INSTANCE, name));
|
||||
return new JpaTransactionManagerImpl(create(overrides), clock);
|
||||
}
|
||||
|
||||
@Provides
|
||||
@Singleton
|
||||
@BeamReadOnlyReplicaJpaTm
|
||||
static JpaTransactionManager provideBeamReadOnlyReplicaJpaTm(
|
||||
@BeamPipelineCloudSqlConfigs ImmutableMap<String, String> beamCloudSqlConfigs,
|
||||
@Config("cloudSqlReplicaInstanceConnectionName")
|
||||
Optional<String> replicaInstanceConnectionName,
|
||||
Clock clock) {
|
||||
HashMap<String, String> overrides = Maps.newHashMap(beamCloudSqlConfigs);
|
||||
replicaInstanceConnectionName.ifPresent(
|
||||
name -> overrides.put(HIKARI_DS_CLOUD_SQL_INSTANCE, name));
|
||||
return new JpaTransactionManagerImpl(create(overrides), clock);
|
||||
}
|
||||
|
||||
/** Constructs the {@link EntityManagerFactory} instance. */
|
||||
@VisibleForTesting
|
||||
static EntityManagerFactory create(
|
||||
@@ -357,7 +371,12 @@ public abstract class PersistenceModule {
|
||||
* The {@link JpaTransactionManager} optimized for bulk loading multi-level JPA entities. Please
|
||||
* see {@link google.registry.model.bulkquery.BulkQueryEntities} for more information.
|
||||
*/
|
||||
BULK_QUERY
|
||||
BULK_QUERY,
|
||||
/**
|
||||
* The {@link JpaTransactionManager} that uses the read-only Postgres replica if configured, or
|
||||
* the standard DB if not.
|
||||
*/
|
||||
READ_ONLY_REPLICA
|
||||
}
|
||||
|
||||
/** Dagger qualifier for JDBC {@link Connection} with schema management privilege. */
|
||||
@@ -383,6 +402,22 @@ public abstract class PersistenceModule {
|
||||
@Documented
|
||||
public @interface BeamBulkQueryJpaTm {}
|
||||
|
||||
/**
|
||||
* Dagger qualifier for {@link JpaTransactionManager} used inside BEAM pipelines that uses the
|
||||
* read-only Postgres replica if one is configured (otherwise it uses the standard DB).
|
||||
*/
|
||||
@Qualifier
|
||||
@Documented
|
||||
public @interface BeamReadOnlyReplicaJpaTm {}
|
||||
|
||||
/**
|
||||
* Dagger qualifier for {@link JpaTransactionManager} that uses the read-only Postgres replica if
|
||||
* one is configured (otherwise it uses the standard DB).
|
||||
*/
|
||||
@Qualifier
|
||||
@Documented
|
||||
public @interface ReadOnlyReplicaJpaTm {}
|
||||
|
||||
/** Dagger qualifier for {@link JpaTransactionManager} used for Nomulus tool. */
|
||||
@Qualifier
|
||||
@Documented
|
||||
|
||||
@@ -144,7 +144,7 @@ public class VKey<T> extends ImmutableObject implements Serializable {
|
||||
*/
|
||||
public static <T> VKey<T> create(String keyString) {
|
||||
if (!keyString.startsWith(CLASS_TYPE + KV_SEPARATOR)) {
|
||||
// to handle the existing ofy key string
|
||||
// handle the existing ofy key string
|
||||
return fromWebsafeKey(keyString);
|
||||
} else {
|
||||
ImmutableMap<String, String> kvs =
|
||||
@@ -307,6 +307,7 @@ public class VKey<T> extends ImmutableObject implements Serializable {
|
||||
if (maybeGetSqlKey().isPresent()) {
|
||||
key += DELIMITER + SQL_LOOKUP_KEY + KV_SEPARATOR + SerializeUtils.stringify(getSqlKey());
|
||||
}
|
||||
// getString() method returns a Base64 encoded web safe of ofy key
|
||||
if (maybeGetOfyKey().isPresent()) {
|
||||
key += DELIMITER + OFY_LOOKUP_KEY + KV_SEPARATOR + getOfyKey().getString();
|
||||
}
|
||||
|
||||
@@ -15,6 +15,7 @@
|
||||
package google.registry.persistence.converter;
|
||||
|
||||
import com.google.common.collect.Maps;
|
||||
import google.registry.model.annotations.DeleteAfterMigration;
|
||||
import google.registry.model.common.DatabaseMigrationStateSchedule;
|
||||
import google.registry.model.common.DatabaseMigrationStateSchedule.MigrationState;
|
||||
import google.registry.model.common.DatabaseMigrationStateSchedule.MigrationStateTransition;
|
||||
@@ -23,6 +24,7 @@ import javax.persistence.Converter;
|
||||
import org.joda.time.DateTime;
|
||||
|
||||
/** JPA converter for {@link DatabaseMigrationStateSchedule} transitions. */
|
||||
@DeleteAfterMigration
|
||||
@Converter(autoApply = true)
|
||||
public class DatabaseMigrationScheduleTransitionConverter
|
||||
extends TimedTransitionPropertyConverterBase<MigrationState, MigrationStateTransition> {
|
||||
|
||||
@@ -141,14 +141,15 @@ public class JpaTransactionManagerImpl implements JpaTransactionManager {
|
||||
// Postgresql-specific: 'set transaction' command must be called inside a transaction
|
||||
assertInTransaction();
|
||||
|
||||
EntityManager entityManager = getEntityManager();
|
||||
ReadOnlyCheckingEntityManager entityManager =
|
||||
(ReadOnlyCheckingEntityManager) getEntityManager();
|
||||
// Isolation is hardcoded to REPEATABLE READ, as specified by parent's Javadoc.
|
||||
entityManager
|
||||
.createNativeQuery("SET TRANSACTION ISOLATION LEVEL REPEATABLE READ")
|
||||
.executeUpdate();
|
||||
.executeUpdateIgnoringReadOnly();
|
||||
entityManager
|
||||
.createNativeQuery(String.format("SET TRANSACTION SNAPSHOT '%s'", snapshotId))
|
||||
.executeUpdate();
|
||||
.executeUpdateIgnoringReadOnly();
|
||||
return this;
|
||||
}
|
||||
|
||||
|
||||
@@ -16,6 +16,7 @@ package google.registry.persistence.transaction;
|
||||
|
||||
import static google.registry.persistence.transaction.TransactionManagerFactory.assertNotReadOnlyMode;
|
||||
|
||||
import google.registry.model.annotations.DeleteAfterMigration;
|
||||
import java.util.List;
|
||||
import java.util.Map;
|
||||
import javax.persistence.EntityGraph;
|
||||
@@ -34,6 +35,7 @@ import javax.persistence.criteria.CriteriaUpdate;
|
||||
import javax.persistence.metamodel.Metamodel;
|
||||
|
||||
/** An {@link EntityManager} that throws exceptions on write actions if in read-only mode. */
|
||||
@DeleteAfterMigration
|
||||
public class ReadOnlyCheckingEntityManager implements EntityManager {
|
||||
|
||||
private final EntityManager delegate;
|
||||
@@ -204,7 +206,7 @@ public class ReadOnlyCheckingEntityManager implements EntityManager {
|
||||
}
|
||||
|
||||
@Override
|
||||
public Query createNativeQuery(String sqlString) {
|
||||
public ReadOnlyCheckingQuery createNativeQuery(String sqlString) {
|
||||
return new ReadOnlyCheckingQuery(delegate.createNativeQuery(sqlString));
|
||||
}
|
||||
|
||||
|
||||
@@ -16,6 +16,7 @@ package google.registry.persistence.transaction;
|
||||
|
||||
import static google.registry.persistence.transaction.TransactionManagerFactory.assertNotReadOnlyMode;
|
||||
|
||||
import google.registry.model.annotations.DeleteAfterMigration;
|
||||
import java.util.Calendar;
|
||||
import java.util.Date;
|
||||
import java.util.List;
|
||||
@@ -29,6 +30,7 @@ import javax.persistence.Query;
|
||||
import javax.persistence.TemporalType;
|
||||
|
||||
/** A {@link Query} that throws exceptions on write actions if in read-only mode. */
|
||||
@DeleteAfterMigration
|
||||
class ReadOnlyCheckingQuery implements Query {
|
||||
|
||||
private final Query delegate;
|
||||
|
||||
@@ -16,6 +16,7 @@ package google.registry.persistence.transaction;
|
||||
|
||||
import static google.registry.persistence.transaction.TransactionManagerFactory.assertNotReadOnlyMode;
|
||||
|
||||
import google.registry.model.annotations.DeleteAfterMigration;
|
||||
import java.util.Calendar;
|
||||
import java.util.Date;
|
||||
import java.util.List;
|
||||
@@ -29,6 +30,7 @@ import javax.persistence.TemporalType;
|
||||
import javax.persistence.TypedQuery;
|
||||
|
||||
/** A {@link TypedQuery <T>} that throws exceptions on write actions if in read-only mode. */
|
||||
@DeleteAfterMigration
|
||||
class ReadOnlyCheckingTypedQuery<T> implements TypedQuery<T> {
|
||||
|
||||
private final TypedQuery<T> delegate;
|
||||
|
||||
@@ -14,7 +14,6 @@
|
||||
|
||||
package google.registry.rde;
|
||||
|
||||
import static com.google.appengine.api.taskqueue.QueueFactory.getQueue;
|
||||
import static google.registry.request.RequestParameters.extractBooleanParameter;
|
||||
import static google.registry.request.RequestParameters.extractOptionalIntParameter;
|
||||
import static google.registry.request.RequestParameters.extractOptionalParameter;
|
||||
@@ -22,7 +21,6 @@ import static google.registry.request.RequestParameters.extractRequiredDatetimeP
|
||||
import static google.registry.request.RequestParameters.extractSetOfDatetimeParameters;
|
||||
import static google.registry.request.RequestParameters.extractSetOfParameters;
|
||||
|
||||
import com.google.appengine.api.taskqueue.Queue;
|
||||
import com.google.common.collect.ImmutableSet;
|
||||
import com.jcraft.jsch.SftpProgressMonitor;
|
||||
import dagger.Binds;
|
||||
@@ -30,7 +28,6 @@ import dagger.Module;
|
||||
import dagger.Provides;
|
||||
import google.registry.request.Parameter;
|
||||
import java.util.Optional;
|
||||
import javax.inject.Named;
|
||||
import javax.servlet.http.HttpServletRequest;
|
||||
import org.joda.time.DateTime;
|
||||
|
||||
@@ -110,12 +107,6 @@ public abstract class RdeModule {
|
||||
return extractOptionalParameter(req, PARAM_PREFIX);
|
||||
}
|
||||
|
||||
@Provides
|
||||
@Named("rde-report")
|
||||
static Queue provideQueueRdeReport() {
|
||||
return getQueue("rde-report");
|
||||
}
|
||||
|
||||
@Binds
|
||||
abstract SftpProgressMonitor provideSftpProgressMonitor(
|
||||
LoggingSftpProgressMonitor loggingSftpProgressMonitor);
|
||||
|
||||
@@ -56,6 +56,7 @@ import google.registry.model.host.HostResource;
|
||||
import google.registry.model.index.EppResourceIndex;
|
||||
import google.registry.model.rde.RdeMode;
|
||||
import google.registry.model.registrar.Registrar;
|
||||
import google.registry.persistence.PersistenceModule.JpaTransactionManagerType;
|
||||
import google.registry.request.Action;
|
||||
import google.registry.request.HttpException.BadRequestException;
|
||||
import google.registry.request.Parameter;
|
||||
@@ -340,6 +341,9 @@ public final class RdeStagingAction implements Runnable {
|
||||
.encode(stagingKeyBytes))
|
||||
.put("registryEnvironment", RegistryEnvironment.get().name())
|
||||
.put("workerMachineType", machineType)
|
||||
.put(
|
||||
"jpaTransactionManagerType",
|
||||
JpaTransactionManagerType.READ_ONLY_REPLICA.toString())
|
||||
// TODO (jianglai): Investigate turning off public IPs (for which
|
||||
// there is a quota) in order to increase the total number of
|
||||
// workers allowed (also under quota).
|
||||
|
||||
@@ -17,6 +17,7 @@ package google.registry.tools;
|
||||
import com.google.common.collect.ImmutableSet;
|
||||
import com.google.common.collect.Sets;
|
||||
import com.google.common.collect.Sets.SetView;
|
||||
import google.registry.model.annotations.DeleteAfterMigration;
|
||||
import java.io.File;
|
||||
|
||||
/**
|
||||
@@ -27,6 +28,7 @@ import java.io.File;
|
||||
* two-level directory hierarchy with data files in level-db format (output-*) and Datastore
|
||||
* metadata files (*.export_metadata).
|
||||
*/
|
||||
@DeleteAfterMigration
|
||||
class CompareDbBackups {
|
||||
private static final String DS_V3_BACKUP_FILE_PREFIX = "output-";
|
||||
|
||||
|
||||
@@ -14,7 +14,6 @@
|
||||
|
||||
package google.registry.tools;
|
||||
|
||||
import static com.google.appengine.api.taskqueue.TaskOptions.Builder.withUrl;
|
||||
import static google.registry.model.tld.Registries.assertTldsExist;
|
||||
import static google.registry.rde.RdeModule.PARAM_BEAM;
|
||||
import static google.registry.rde.RdeModule.PARAM_DIRECTORY;
|
||||
@@ -23,23 +22,22 @@ import static google.registry.rde.RdeModule.PARAM_MANUAL;
|
||||
import static google.registry.rde.RdeModule.PARAM_MODE;
|
||||
import static google.registry.rde.RdeModule.PARAM_REVISION;
|
||||
import static google.registry.rde.RdeModule.PARAM_WATERMARKS;
|
||||
import static google.registry.rde.RdeModule.RDE_REPORT_QUEUE;
|
||||
import static google.registry.request.RequestParameters.PARAM_TLDS;
|
||||
|
||||
import com.beust.jcommander.Parameter;
|
||||
import com.beust.jcommander.ParameterException;
|
||||
import com.beust.jcommander.Parameters;
|
||||
import com.google.appengine.api.taskqueue.Queue;
|
||||
import com.google.appengine.api.taskqueue.TaskOptions;
|
||||
import com.google.common.annotations.VisibleForTesting;
|
||||
import com.google.common.collect.ImmutableMultimap;
|
||||
import google.registry.model.rde.RdeMode;
|
||||
import google.registry.rde.RdeStagingAction;
|
||||
import google.registry.request.Action.Service;
|
||||
import google.registry.tools.params.DateTimeParameter;
|
||||
import google.registry.util.AppEngineServiceUtils;
|
||||
import google.registry.util.CloudTasksUtils;
|
||||
import java.util.List;
|
||||
import java.util.Optional;
|
||||
import java.util.stream.Collectors;
|
||||
import javax.inject.Inject;
|
||||
import javax.inject.Named;
|
||||
import org.joda.time.DateTime;
|
||||
|
||||
/**
|
||||
@@ -94,15 +92,7 @@ final class GenerateEscrowDepositCommand implements CommandWithRemoteApi {
|
||||
|
||||
@Inject AppEngineServiceUtils appEngineServiceUtils;
|
||||
|
||||
@Inject
|
||||
@Named("rde-report")
|
||||
Queue queue;
|
||||
|
||||
// ETA is a required property for TaskOptions but we let the service to set it when submitting the
|
||||
// task to the task queue. However, the local test service doesn't do that for us during the unit
|
||||
// test, so we add this field here to let the unit test be able to inject the ETA to pass the
|
||||
// test.
|
||||
@VisibleForTesting Optional<Long> maybeEtaMillis = Optional.empty();
|
||||
@Inject CloudTasksUtils cloudTasksUtils;
|
||||
|
||||
@Override
|
||||
public void run() {
|
||||
@@ -126,27 +116,25 @@ final class GenerateEscrowDepositCommand implements CommandWithRemoteApi {
|
||||
throw new ParameterException("Output subdirectory must not be empty");
|
||||
}
|
||||
|
||||
// Unlike many tool commands, this command is actually invoking an action on the backend module
|
||||
// (because it's a mapreduce). So we invoke it in a different way.
|
||||
String hostname = appEngineServiceUtils.getCurrentVersionHostname("backend");
|
||||
TaskOptions opts =
|
||||
withUrl(RdeStagingAction.PATH)
|
||||
.header("Host", hostname)
|
||||
.param(PARAM_MANUAL, String.valueOf(true))
|
||||
.param(PARAM_MODE, mode.toString())
|
||||
.param(PARAM_DIRECTORY, outdir)
|
||||
.param(PARAM_LENIENT, Boolean.toString(lenient))
|
||||
.param(PARAM_BEAM, Boolean.toString(beam))
|
||||
.param(PARAM_TLDS, tlds.stream().collect(Collectors.joining(",")))
|
||||
.param(
|
||||
ImmutableMultimap.Builder<String, String> paramsBuilder =
|
||||
new ImmutableMultimap.Builder<String, String>()
|
||||
.put(PARAM_MANUAL, String.valueOf(true))
|
||||
.put(PARAM_MODE, mode.toString())
|
||||
.put(PARAM_DIRECTORY, outdir)
|
||||
.put(PARAM_LENIENT, Boolean.toString(lenient))
|
||||
.put(PARAM_BEAM, Boolean.toString(beam))
|
||||
.put(PARAM_TLDS, tlds.stream().collect(Collectors.joining(",")))
|
||||
.put(
|
||||
PARAM_WATERMARKS,
|
||||
watermarks.stream().map(DateTime::toString).collect(Collectors.joining(",")));
|
||||
|
||||
if (revision != null) {
|
||||
opts = opts.param(PARAM_REVISION, String.valueOf(revision));
|
||||
paramsBuilder.put(PARAM_REVISION, String.valueOf(revision));
|
||||
}
|
||||
if (maybeEtaMillis.isPresent()) {
|
||||
opts = opts.etaMillis(maybeEtaMillis.get());
|
||||
}
|
||||
queue.add(opts);
|
||||
cloudTasksUtils.enqueue(
|
||||
RDE_REPORT_QUEUE,
|
||||
CloudTasksUtils.createPostTask(
|
||||
RdeStagingAction.PATH, Service.BACKEND.toString(), paramsBuilder.build()));
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
@@ -15,12 +15,14 @@
|
||||
package google.registry.tools;
|
||||
|
||||
import com.beust.jcommander.Parameters;
|
||||
import google.registry.model.annotations.DeleteAfterMigration;
|
||||
import google.registry.model.common.DatabaseMigrationStateSchedule;
|
||||
import google.registry.model.common.DatabaseMigrationStateSchedule.MigrationState;
|
||||
import google.registry.model.common.DatabaseMigrationStateSchedule.MigrationStateTransition;
|
||||
import google.registry.model.common.TimedTransitionProperty;
|
||||
|
||||
/** A command to check the current Registry 3.0 migration state of the database. */
|
||||
@DeleteAfterMigration
|
||||
@Parameters(separators = " =", commandDescription = "Check current Registry 3.0 migration state")
|
||||
public class GetDatabaseMigrationStateCommand implements CommandWithRemoteApi {
|
||||
|
||||
|
||||
@@ -55,7 +55,7 @@ abstract class GetEppResourceCommand implements CommandWithRemoteApi {
|
||||
? String.format(
|
||||
"%s\n\nWebsafe key: %s",
|
||||
expand ? resource.get().toHydratedString() : resource.get(),
|
||||
resource.get().createVKey().getOfyKey().getString())
|
||||
resource.get().createVKey().stringify())
|
||||
: String.format("%s '%s' does not exist or is deleted\n", resourceType, uniqueId));
|
||||
}
|
||||
|
||||
|
||||
@@ -16,7 +16,6 @@ package google.registry.tools;
|
||||
|
||||
import com.beust.jcommander.Parameter;
|
||||
import com.beust.jcommander.Parameters;
|
||||
import com.google.common.collect.Streams;
|
||||
import google.registry.model.tld.label.PremiumList;
|
||||
import google.registry.model.tld.label.PremiumList.PremiumEntry;
|
||||
import google.registry.model.tld.label.PremiumListDao;
|
||||
@@ -40,7 +39,7 @@ public class GetPremiumListCommand implements CommandWithRemoteApi {
|
||||
System.out.printf(
|
||||
"%s:\n%s\n",
|
||||
premiumListName,
|
||||
Streams.stream(PremiumListDao.loadAllPremiumEntries(premiumListName))
|
||||
PremiumListDao.loadAllPremiumEntries(premiumListName).stream()
|
||||
.sorted(Comparator.comparing(PremiumEntry::getDomainLabel))
|
||||
.map(premiumEntry -> premiumEntry.toString(premiumList.get().getCurrency()))
|
||||
.collect(Collectors.joining("\n")));
|
||||
|
||||
@@ -20,12 +20,12 @@ import static google.registry.model.ofy.ObjectifyService.auditedOfy;
|
||||
import com.beust.jcommander.Parameter;
|
||||
import com.beust.jcommander.Parameters;
|
||||
import google.registry.model.EppResource;
|
||||
import google.registry.model.annotations.DeleteAfterMigration;
|
||||
import google.registry.persistence.VKey;
|
||||
import java.util.List;
|
||||
|
||||
/**
|
||||
* Command to get info on a Datastore resource by websafe key.
|
||||
*/
|
||||
/** Command to get info on a Datastore resource by websafe key. */
|
||||
@DeleteAfterMigration
|
||||
@Parameters(separators = " =", commandDescription = "Fetch a Datastore resource by websafe key")
|
||||
final class GetResourceByKeyCommand implements CommandWithRemoteApi {
|
||||
|
||||
|
||||
@@ -19,11 +19,13 @@ import static java.nio.charset.StandardCharsets.UTF_8;
|
||||
import com.beust.jcommander.Parameter;
|
||||
import com.beust.jcommander.Parameters;
|
||||
import google.registry.model.SchemaVersion;
|
||||
import google.registry.model.annotations.DeleteAfterMigration;
|
||||
import java.io.File;
|
||||
import java.io.IOException;
|
||||
import java.nio.file.Files;
|
||||
|
||||
/** Generates the schema file used for model versioning. */
|
||||
@DeleteAfterMigration
|
||||
@Parameters(commandDescription = "Generate a model schema file")
|
||||
final class GetSchemaCommand implements Command {
|
||||
|
||||
|
||||
@@ -29,6 +29,7 @@ import com.googlecode.objectify.annotation.Entity;
|
||||
import com.googlecode.objectify.annotation.EntitySubclass;
|
||||
import com.googlecode.objectify.annotation.Parent;
|
||||
import google.registry.model.BackupGroupRoot;
|
||||
import google.registry.model.annotations.DeleteAfterMigration;
|
||||
import google.registry.model.annotations.NotBackedUp;
|
||||
import google.registry.model.annotations.VirtualEntity;
|
||||
import java.io.Serializable;
|
||||
@@ -40,6 +41,7 @@ import java.util.Map;
|
||||
import java.util.Set;
|
||||
|
||||
/** Visualizes the schema parentage tree. */
|
||||
@DeleteAfterMigration
|
||||
@Parameters(commandDescription = "Generate a model schema file")
|
||||
final class GetSchemaTreeCommand implements Command {
|
||||
|
||||
|
||||
@@ -22,6 +22,7 @@ import com.google.common.base.Ascii;
|
||||
import com.google.common.collect.ImmutableList;
|
||||
import google.registry.export.datastore.DatastoreAdmin;
|
||||
import google.registry.export.datastore.Operation;
|
||||
import google.registry.model.annotations.DeleteAfterMigration;
|
||||
import java.util.List;
|
||||
import java.util.concurrent.TimeUnit;
|
||||
import javax.annotation.Nullable;
|
||||
@@ -35,6 +36,7 @@ import org.joda.time.Duration;
|
||||
* href="http://playbooks/domain_registry/procedures/backup-restore-testing.md">the playbook</a> for
|
||||
* the entire process.
|
||||
*/
|
||||
@DeleteAfterMigration
|
||||
@Parameters(separators = " =", commandDescription = "Imports a backup of the Datastore.")
|
||||
public class ImportDatastoreCommand extends ConfirmingCommand {
|
||||
|
||||
|
||||
@@ -20,6 +20,7 @@ import com.google.api.client.json.JsonFactory;
|
||||
import com.google.api.client.json.jackson2.JacksonFactory;
|
||||
import google.registry.export.datastore.DatastoreAdmin;
|
||||
import google.registry.export.datastore.DatastoreAdmin.ListOperations;
|
||||
import google.registry.model.annotations.DeleteAfterMigration;
|
||||
import google.registry.util.Clock;
|
||||
import java.util.Optional;
|
||||
import javax.annotation.Nullable;
|
||||
@@ -28,6 +29,7 @@ import org.joda.time.DateTime;
|
||||
import org.joda.time.Duration;
|
||||
|
||||
/** Command that lists Datastore operations. */
|
||||
@DeleteAfterMigration
|
||||
@Parameters(separators = " =", commandDescription = "List Datastore operations.")
|
||||
public class ListDatastoreOperationsCommand implements Command {
|
||||
|
||||
|
||||
@@ -26,12 +26,14 @@ import com.google.common.util.concurrent.Futures;
|
||||
import com.google.common.util.concurrent.ListenableFuture;
|
||||
import google.registry.bigquery.BigqueryUtils.SourceFormat;
|
||||
import google.registry.export.AnnotatedEntities;
|
||||
import google.registry.model.annotations.DeleteAfterMigration;
|
||||
import java.util.ArrayList;
|
||||
import java.util.List;
|
||||
import java.util.Map;
|
||||
import java.util.Objects;
|
||||
|
||||
/** Command to load Datastore snapshots into Bigquery. */
|
||||
@DeleteAfterMigration
|
||||
@Parameters(separators = " =", commandDescription = "Load Datastore snapshot into Bigquery")
|
||||
final class LoadSnapshotCommand extends BigqueryCommand {
|
||||
|
||||
|
||||
@@ -25,6 +25,7 @@ import com.google.common.io.CharStreams;
|
||||
import com.google.common.io.Files;
|
||||
import com.googlecode.objectify.Key;
|
||||
import google.registry.model.ImmutableObject;
|
||||
import google.registry.model.annotations.DeleteAfterMigration;
|
||||
import google.registry.model.domain.DomainBase;
|
||||
import google.registry.persistence.VKey;
|
||||
import google.registry.util.NonFinalForTesting;
|
||||
@@ -40,6 +41,7 @@ import java.util.List;
|
||||
* <p>The key path is the value of column __key__.path of the entity's BigQuery table. Its value is
|
||||
* converted from the entity's key.
|
||||
*/
|
||||
@DeleteAfterMigration
|
||||
abstract class ReadEntityFromKeyPathCommand<T> extends MutatingCommand {
|
||||
|
||||
@Parameter(
|
||||
|
||||
@@ -17,6 +17,7 @@ package google.registry.tools;
|
||||
import com.google.common.collect.ImmutableMap;
|
||||
import google.registry.tools.javascrap.BackfillRegistryLocksCommand;
|
||||
import google.registry.tools.javascrap.BackfillSpec11ThreatMatchesCommand;
|
||||
import google.registry.tools.javascrap.CompareEscrowDepositsCommand;
|
||||
import google.registry.tools.javascrap.DeleteContactByRoidCommand;
|
||||
import google.registry.tools.javascrap.HardDeleteHostCommand;
|
||||
import google.registry.tools.javascrap.PopulateNullRegistrarFieldsCommand;
|
||||
@@ -40,6 +41,7 @@ public final class RegistryTool {
|
||||
.put("canonicalize_labels", CanonicalizeLabelsCommand.class)
|
||||
.put("check_domain", CheckDomainCommand.class)
|
||||
.put("check_domain_claims", CheckDomainClaimsCommand.class)
|
||||
.put("compare_escrow_deposits", CompareEscrowDepositsCommand.class)
|
||||
.put("convert_idn", ConvertIdnCommand.class)
|
||||
.put("count_domains", CountDomainsCommand.class)
|
||||
.put("create_anchor_tenant", CreateAnchorTenantCommand.class)
|
||||
|
||||
@@ -19,6 +19,7 @@ import dagger.Component;
|
||||
import dagger.Lazy;
|
||||
import google.registry.batch.BatchModule;
|
||||
import google.registry.bigquery.BigqueryModule;
|
||||
import google.registry.config.CloudTasksUtilsModule;
|
||||
import google.registry.config.CredentialModule.LocalCredentialJson;
|
||||
import google.registry.config.RegistryConfig.Config;
|
||||
import google.registry.config.RegistryConfig.ConfigModule;
|
||||
@@ -42,6 +43,7 @@ import google.registry.request.Modules.UrlFetchTransportModule;
|
||||
import google.registry.request.Modules.UserServiceModule;
|
||||
import google.registry.tools.AuthModule.LocalCredentialModule;
|
||||
import google.registry.tools.javascrap.BackfillRegistryLocksCommand;
|
||||
import google.registry.tools.javascrap.CompareEscrowDepositsCommand;
|
||||
import google.registry.tools.javascrap.DeleteContactByRoidCommand;
|
||||
import google.registry.tools.javascrap.HardDeleteHostCommand;
|
||||
import google.registry.util.UtilsModule;
|
||||
@@ -64,6 +66,7 @@ import javax.inject.Singleton;
|
||||
BigqueryModule.class,
|
||||
ConfigModule.class,
|
||||
CloudDnsWriterModule.class,
|
||||
CloudTasksUtilsModule.class,
|
||||
DatastoreAdminModule.class,
|
||||
DatastoreServiceModule.class,
|
||||
DummyKeyringModule.class,
|
||||
@@ -93,6 +96,8 @@ interface RegistryToolComponent {
|
||||
|
||||
void inject(CheckDomainCommand command);
|
||||
|
||||
void inject(CompareEscrowDepositsCommand command);
|
||||
|
||||
void inject(CountDomainsCommand command);
|
||||
|
||||
void inject(CreateAnchorTenantCommand command);
|
||||
|
||||
@@ -19,12 +19,14 @@ import static google.registry.persistence.transaction.TransactionManagerFactory.
|
||||
import com.beust.jcommander.Parameter;
|
||||
import com.beust.jcommander.Parameters;
|
||||
import com.google.common.collect.ImmutableSortedMap;
|
||||
import google.registry.model.annotations.DeleteAfterMigration;
|
||||
import google.registry.model.common.DatabaseMigrationStateSchedule;
|
||||
import google.registry.model.common.DatabaseMigrationStateSchedule.MigrationState;
|
||||
import google.registry.tools.params.TransitionListParameter.MigrationStateTransitions;
|
||||
import org.joda.time.DateTime;
|
||||
|
||||
/** Command to set the Registry 3.0 database migration state schedule. */
|
||||
@DeleteAfterMigration
|
||||
@Parameters(
|
||||
separators = " =",
|
||||
commandDescription = "Set the current database migration state schedule.")
|
||||
|
||||
@@ -15,21 +15,15 @@
|
||||
package google.registry.tools;
|
||||
|
||||
import static com.google.common.base.Preconditions.checkArgument;
|
||||
import static com.google.common.collect.ImmutableSet.toImmutableSet;
|
||||
import static google.registry.model.tld.label.PremiumListUtils.parseToPremiumList;
|
||||
import static google.registry.persistence.transaction.TransactionManagerFactory.jpaTm;
|
||||
import static google.registry.util.ListNamingUtils.convertFilePathToName;
|
||||
import static java.nio.charset.StandardCharsets.UTF_8;
|
||||
|
||||
import com.beust.jcommander.Parameters;
|
||||
import com.google.common.base.Strings;
|
||||
import com.google.common.collect.ImmutableSet;
|
||||
import com.google.common.collect.Streams;
|
||||
import google.registry.model.tld.label.PremiumList;
|
||||
import google.registry.model.tld.label.PremiumList.PremiumEntry;
|
||||
import google.registry.model.tld.label.PremiumListDao;
|
||||
import google.registry.model.tld.label.PremiumListUtils;
|
||||
import java.nio.file.Files;
|
||||
import java.util.List;
|
||||
import java.util.Optional;
|
||||
|
||||
/** Command to safely update {@link PremiumList} in Database for a given TLD. */
|
||||
@@ -43,46 +37,12 @@ class UpdatePremiumListCommand extends CreateOrUpdatePremiumListCommand {
|
||||
checkArgument(
|
||||
list.isPresent(),
|
||||
String.format("Could not update premium list %s because it doesn't exist.", name));
|
||||
List<String> existingEntry = getExistingPremiumEntry(list.get()).asList();
|
||||
inputData = Files.readAllLines(inputFile, UTF_8);
|
||||
checkArgument(!inputData.isEmpty(), "New premium list data cannot be empty");
|
||||
currency = list.get().getCurrency();
|
||||
// reconstructing existing premium list to bypass Hibernate lazy initialization exception
|
||||
PremiumList existingPremiumList = parseToPremiumList(name, currency, existingEntry);
|
||||
PremiumList updatedPremiumList = parseToPremiumList(name, currency, inputData);
|
||||
|
||||
PremiumList updatedPremiumList = PremiumListUtils.parseToPremiumList(name, currency, inputData);
|
||||
return String.format(
|
||||
"Update premium list for %s?\n Old List: %s\n New List: %s",
|
||||
name, existingPremiumList, updatedPremiumList);
|
||||
}
|
||||
|
||||
/*
|
||||
To get premium list content as a set of string. This is a workaround to avoid dealing with
|
||||
Hibernate.LazyInitizationException error. It occurs when trying to access data of the
|
||||
latest revision of an existing premium list.
|
||||
"Cannot evaluate google.registry.model.tld.label.PremiumList.toString()'".
|
||||
Ideally, the following should be the way to verify info in latest revision of a premium list:
|
||||
|
||||
PremiumList existingPremiumList =
|
||||
PremiumListSqlDao.getLatestRevision(name)
|
||||
.orElseThrow(
|
||||
() ->
|
||||
new IllegalArgumentException(
|
||||
String.format(
|
||||
"Could not update premium list %s because it doesn't exist.", name)));
|
||||
assertThat(persistedList.getLabelsToPrices()).containsEntry("foo", new BigDecimal("9000.00"));
|
||||
assertThat(persistedList.size()).isEqualTo(1);
|
||||
*/
|
||||
protected ImmutableSet<String> getExistingPremiumEntry(PremiumList list) {
|
||||
|
||||
Iterable<PremiumEntry> sqlListEntries =
|
||||
jpaTm().transact(() -> PremiumListDao.loadPremiumEntries(list));
|
||||
return Streams.stream(sqlListEntries)
|
||||
.map(
|
||||
premiumEntry ->
|
||||
String.format(
|
||||
"%s,%s %s",
|
||||
premiumEntry.getDomainLabel(), list.getCurrency(), premiumEntry.getValue()))
|
||||
.collect(toImmutableSet());
|
||||
name, list, updatedPremiumList);
|
||||
}
|
||||
}
|
||||
|
||||
@@ -0,0 +1,130 @@
|
||||
// Copyright 2022 The Nomulus Authors. All Rights Reserved.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package google.registry.tools.javascrap;
|
||||
|
||||
import static com.google.common.base.Preconditions.checkArgument;
|
||||
import static com.google.common.base.Preconditions.checkNotNull;
|
||||
import static com.google.common.collect.Sets.difference;
|
||||
|
||||
import com.beust.jcommander.Parameter;
|
||||
import com.beust.jcommander.Parameters;
|
||||
import com.google.common.base.Joiner;
|
||||
import com.google.common.collect.ImmutableList;
|
||||
import google.registry.keyring.api.Keyring;
|
||||
import google.registry.model.annotations.DeleteAfterMigration;
|
||||
import google.registry.rde.Ghostryde;
|
||||
import google.registry.tools.Command;
|
||||
import google.registry.tools.params.PathParameter;
|
||||
import google.registry.xjc.XjcXmlTransformer;
|
||||
import google.registry.xjc.rde.XjcRdeDeposit;
|
||||
import google.registry.xjc.rdedomain.XjcRdeDomain;
|
||||
import google.registry.xjc.rderegistrar.XjcRdeRegistrar;
|
||||
import google.registry.xml.XmlException;
|
||||
import java.io.IOException;
|
||||
import java.io.InputStream;
|
||||
import java.nio.file.Files;
|
||||
import java.nio.file.Path;
|
||||
import java.util.HashSet;
|
||||
import java.util.List;
|
||||
import java.util.Set;
|
||||
import javax.inject.Inject;
|
||||
import javax.inject.Provider;
|
||||
import javax.xml.bind.JAXBElement;
|
||||
|
||||
/**
|
||||
* Command to view and schema validate an XML RDE escrow deposit.
|
||||
*
|
||||
* <p>Note that this command only makes sure that both deposits contain the same registrars and
|
||||
* domains, regardless of the order. To verify that they are indeed equivalent one still needs to
|
||||
* verify internal consistency within each deposit (i.e. to check that all hosts and contacts
|
||||
* referenced by domains are included in the deposit) by calling {@code
|
||||
* google.registry.tools.ValidateEscrowDepositCommand}.
|
||||
*/
|
||||
@DeleteAfterMigration
|
||||
@Parameters(separators = " =", commandDescription = "Compare two XML escrow deposits.")
|
||||
public final class CompareEscrowDepositsCommand implements Command {
|
||||
|
||||
@Parameter(
|
||||
description =
|
||||
"Two XML escrow deposit files. Each may be a plain XML or an XML GhostRyDE file.",
|
||||
validateWith = PathParameter.InputFile.class)
|
||||
private List<Path> inputs;
|
||||
|
||||
@Inject Provider<Keyring> keyring;
|
||||
|
||||
private XjcRdeDeposit getDeposit(Path input) throws IOException, XmlException {
|
||||
InputStream fileStream = Files.newInputStream(input);
|
||||
InputStream inputStream = fileStream;
|
||||
if (input.toString().endsWith(".ghostryde")) {
|
||||
inputStream = Ghostryde.decoder(fileStream, keyring.get().getRdeStagingDecryptionKey());
|
||||
}
|
||||
return XjcXmlTransformer.unmarshal(XjcRdeDeposit.class, inputStream);
|
||||
}
|
||||
|
||||
@Override
|
||||
public void run() throws Exception {
|
||||
checkArgument(
|
||||
inputs.size() == 2,
|
||||
"Must supply 2 files to compare, but %s was/were supplied.",
|
||||
inputs.size());
|
||||
XjcRdeDeposit deposit1 = getDeposit(inputs.get(0));
|
||||
XjcRdeDeposit deposit2 = getDeposit(inputs.get(1));
|
||||
compareXmlDeposits(deposit1, deposit2);
|
||||
}
|
||||
|
||||
private static void process(XjcRdeDeposit deposit, Set<String> domains, Set<String> registrars) {
|
||||
for (JAXBElement<?> item : deposit.getContents().getContents()) {
|
||||
if (XjcRdeDomain.class.isAssignableFrom(item.getDeclaredType())) {
|
||||
XjcRdeDomain domain = (XjcRdeDomain) item.getValue();
|
||||
domains.add(checkNotNull(domain.getName()));
|
||||
} else if (XjcRdeRegistrar.class.isAssignableFrom(item.getDeclaredType())) {
|
||||
XjcRdeRegistrar registrar = (XjcRdeRegistrar) item.getValue();
|
||||
registrars.add(checkNotNull(registrar.getId()));
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
private static boolean printUniqueElements(
|
||||
Set<String> set1, Set<String> set2, String element, String deposit) {
|
||||
ImmutableList<String> uniqueElements = ImmutableList.copyOf(difference(set1, set2));
|
||||
if (!uniqueElements.isEmpty()) {
|
||||
System.out.printf(
|
||||
"%s only in %s:\n%s\n", element, deposit, Joiner.on("\n").join(uniqueElements));
|
||||
return false;
|
||||
}
|
||||
return true;
|
||||
}
|
||||
|
||||
private static void compareXmlDeposits(XjcRdeDeposit deposit1, XjcRdeDeposit deposit2) {
|
||||
Set<String> domains1 = new HashSet<>();
|
||||
Set<String> domains2 = new HashSet<>();
|
||||
Set<String> registrars1 = new HashSet<>();
|
||||
Set<String> registrars2 = new HashSet<>();
|
||||
process(deposit1, domains1, registrars1);
|
||||
process(deposit2, domains2, registrars2);
|
||||
boolean good = true;
|
||||
good &= printUniqueElements(domains1, domains2, "domains", "deposit1");
|
||||
good &= printUniqueElements(domains2, domains1, "domains", "deposit2");
|
||||
good &= printUniqueElements(registrars1, registrars2, "registrars", "deposit1");
|
||||
good &= printUniqueElements(registrars2, registrars1, "registrars", "deposit2");
|
||||
if (good) {
|
||||
System.out.println(
|
||||
"The two deposits contain the same domains and registrars. "
|
||||
+ "You still need to run validate_escrow_deposit to check reference consistency.");
|
||||
} else {
|
||||
System.out.println("The two deposits differ.");
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -11,6 +11,12 @@
|
||||
"^PRODUCTION|SANDBOX|CRASH|QA|ALPHA$"
|
||||
]
|
||||
},
|
||||
{
|
||||
"name": "jpaTransactionManagerType",
|
||||
"label": "The type of JPA transaction manager to use",
|
||||
"helpText": "The standard SQL instance or a read-only replica may be used",
|
||||
"regexes": ["^REGULAR|READ_ONLY_REPLICA$"]
|
||||
},
|
||||
{
|
||||
"name": "pendings",
|
||||
"label": "The pendings deposits to generate.",
|
||||
|
||||
@@ -103,7 +103,7 @@ public class AsyncTaskEnqueuerTest {
|
||||
.method("POST")
|
||||
.header("Host", "backend.hostname.fake")
|
||||
.header("content-type", "application/x-www-form-urlencoded")
|
||||
.param(PARAM_RESOURCE_KEY, contact.createVKey().getOfyKey().getString())
|
||||
.param(PARAM_RESOURCE_KEY, contact.createVKey().stringify())
|
||||
.param(PARAM_REQUESTED_TIME, clock.nowUtc().toString())
|
||||
.etaDelta(
|
||||
standardDays(5).minus(standardSeconds(30)),
|
||||
@@ -125,7 +125,7 @@ public class AsyncTaskEnqueuerTest {
|
||||
.method("POST")
|
||||
.header("Host", "backend.hostname.fake")
|
||||
.header("content-type", "application/x-www-form-urlencoded")
|
||||
.param(PARAM_RESOURCE_KEY, contact.createVKey().getOfyKey().getString())
|
||||
.param(PARAM_RESOURCE_KEY, contact.createVKey().stringify())
|
||||
.param(PARAM_REQUESTED_TIME, now.toString())
|
||||
.param(PARAM_RESAVE_TIMES, "2015-05-20T14:34:56.000Z,2015-05-21T15:34:56.000Z")
|
||||
.etaDelta(
|
||||
|
||||
@@ -18,6 +18,7 @@ import static com.google.appengine.api.taskqueue.QueueFactory.getQueue;
|
||||
import static com.google.common.collect.MoreCollectors.onlyElement;
|
||||
import static com.google.common.truth.Truth.assertThat;
|
||||
import static com.google.common.truth.Truth8.assertThat;
|
||||
import static google.registry.batch.AsyncTaskEnqueuer.PARAM_RESOURCE_KEY;
|
||||
import static google.registry.batch.AsyncTaskEnqueuer.QUEUE_ASYNC_DELETE;
|
||||
import static google.registry.batch.AsyncTaskMetrics.OperationResult.STALE;
|
||||
import static google.registry.model.EppResourceUtils.loadByForeignKey;
|
||||
@@ -496,7 +497,7 @@ public class DeleteContactsAndHostsActionTest
|
||||
QUEUE_ASYNC_DELETE,
|
||||
new TaskMatcher()
|
||||
.etaDelta(standardHours(23), standardHours(25))
|
||||
.param("resourceKey", contactNotSaved.createVKey().getOfyKey().getString())
|
||||
.param(PARAM_RESOURCE_KEY, contactNotSaved.createVKey().stringify())
|
||||
.param("requestingClientId", "TheRegistrar")
|
||||
.param("clientTransactionId", "fakeClientTrid")
|
||||
.param("serverTransactionId", "fakeServerTrid")
|
||||
@@ -504,7 +505,7 @@ public class DeleteContactsAndHostsActionTest
|
||||
.param("requestedTime", timeBeforeRun.toString()),
|
||||
new TaskMatcher()
|
||||
.etaDelta(standardHours(23), standardHours(25))
|
||||
.param("resourceKey", hostNotSaved.createVKey().getOfyKey().getString())
|
||||
.param(PARAM_RESOURCE_KEY, hostNotSaved.createVKey().stringify())
|
||||
.param("requestingClientId", "TheRegistrar")
|
||||
.param("clientTransactionId", "fakeClientTrid")
|
||||
.param("serverTransactionId", "fakeServerTrid")
|
||||
|
||||
@@ -145,7 +145,7 @@ public class RefreshDnsOnHostRenameActionTest
|
||||
assertTasksEnqueued(
|
||||
QUEUE_ASYNC_HOST_RENAME,
|
||||
new TaskMatcher()
|
||||
.param(PARAM_HOST_KEY, host.createVKey().getOfyKey().getString())
|
||||
.param(PARAM_HOST_KEY, host.createVKey().stringify())
|
||||
.param(PARAM_REQUESTED_TIME, timeEnqueued.toString()));
|
||||
verify(action.asyncTaskMetrics).recordDnsRefreshBatchSize(1L);
|
||||
verifyNoMoreInteractions(action.asyncTaskMetrics);
|
||||
@@ -237,7 +237,7 @@ public class RefreshDnsOnHostRenameActionTest
|
||||
QUEUE_ASYNC_HOST_RENAME,
|
||||
new TaskMatcher()
|
||||
.etaDelta(standardHours(23), standardHours(25))
|
||||
.param("hostKey", host.createVKey().getOfyKey().getString()));
|
||||
.param(PARAM_HOST_KEY, host.createVKey().stringify()));
|
||||
assertThat(acquireLock()).isPresent();
|
||||
}
|
||||
|
||||
|
||||
@@ -150,7 +150,7 @@ public class ResaveEntityActionTest {
|
||||
.method("POST")
|
||||
.header("Host", "backend.hostname.fake")
|
||||
.header("content-type", "application/x-www-form-urlencoded")
|
||||
.param(PARAM_RESOURCE_KEY, resavedDomain.createVKey().getOfyKey().getString())
|
||||
.param(PARAM_RESOURCE_KEY, resavedDomain.createVKey().stringify())
|
||||
.param(PARAM_REQUESTED_TIME, requestedTime.toString())
|
||||
.etaDelta(
|
||||
standardDays(5).minus(standardSeconds(30)),
|
||||
|
||||
@@ -28,6 +28,7 @@ import google.registry.testing.FakeClock;
|
||||
import google.registry.testing.InjectExtension;
|
||||
import java.io.Serializable;
|
||||
import java.nio.file.Path;
|
||||
import java.util.Optional;
|
||||
import org.apache.beam.sdk.coders.SerializableCoder;
|
||||
import org.apache.beam.sdk.testing.PAssert;
|
||||
import org.apache.beam.sdk.values.PCollectionTuple;
|
||||
@@ -83,7 +84,8 @@ class DatastoreSnapshotsTest {
|
||||
setupHelper.commitLogDir.getAbsolutePath(),
|
||||
START_TIME,
|
||||
fakeClock.nowUtc().plusMillis(1),
|
||||
ImmutableSet.copyOf(DatastoreSetupHelper.ALL_KINDS));
|
||||
ImmutableSet.copyOf(DatastoreSetupHelper.ALL_KINDS),
|
||||
Optional.empty());
|
||||
PAssert.that(tuple.get(ValidateSqlUtils.createSqlEntityTupleTag(Registrar.class)))
|
||||
.containsInAnyOrder(setupHelper.registrar1, setupHelper.registrar2);
|
||||
PAssert.that(tuple.get(ValidateSqlUtils.createSqlEntityTupleTag(DomainHistory.class)))
|
||||
|
||||
@@ -88,6 +88,7 @@ class SqlSnapshotsTest {
|
||||
DomainHistory.class,
|
||||
ContactResource.class,
|
||||
HostResource.class),
|
||||
Optional.empty(),
|
||||
Optional.empty());
|
||||
PAssert.that(sqlSnapshot.get(createSqlEntityTupleTag(Registry.class)))
|
||||
.containsInAnyOrder(setupHelper.registry);
|
||||
|
||||
@@ -98,11 +98,7 @@ class TldFanoutActionTest {
|
||||
}
|
||||
|
||||
private void assertTaskWithoutTld() {
|
||||
cloudTasksHelper.assertTasksEnqueued(
|
||||
QUEUE,
|
||||
new TaskMatcher()
|
||||
.url(ENDPOINT)
|
||||
.header("content-type", "application/x-www-form-urlencoded"));
|
||||
cloudTasksHelper.assertTasksEnqueued(QUEUE, new TaskMatcher().url(ENDPOINT));
|
||||
}
|
||||
|
||||
@Test
|
||||
|
||||
@@ -16,6 +16,7 @@ package google.registry.flows;
|
||||
|
||||
import static com.google.common.collect.ImmutableList.toImmutableList;
|
||||
import static com.google.common.truth.Truth.assertThat;
|
||||
import static google.registry.batch.AsyncTaskEnqueuer.PARAM_RESOURCE_KEY;
|
||||
import static google.registry.model.EppResourceUtils.loadByForeignKey;
|
||||
import static google.registry.model.ImmutableObjectSubject.assertAboutImmutableObjects;
|
||||
import static google.registry.model.ofy.ObjectifyService.auditedOfy;
|
||||
@@ -146,7 +147,7 @@ public abstract class ResourceFlowTestCase<F extends Flow, R extends EppResource
|
||||
TaskMatcher expected =
|
||||
new TaskMatcher()
|
||||
.etaDelta(Duration.standardSeconds(75), Duration.standardSeconds(105)) // expected: 90
|
||||
.param("resourceKey", resource.createVKey().getOfyKey().getString())
|
||||
.param(PARAM_RESOURCE_KEY, resource.createVKey().stringify())
|
||||
.param("requestingClientId", requestingClientId)
|
||||
.param("serverTransactionId", trid.getServerTransactionId())
|
||||
.param("isSuperuser", Boolean.toString(isSuperuser))
|
||||
|
||||
@@ -321,7 +321,7 @@ class DomainDeleteFlowTest extends ResourceFlowTestCase<DomainDeleteFlow, Domain
|
||||
.method("POST")
|
||||
.header("Host", "backend.hostname.fake")
|
||||
.header("content-type", "application/x-www-form-urlencoded")
|
||||
.param(PARAM_RESOURCE_KEY, domain.createVKey().getOfyKey().getString())
|
||||
.param(PARAM_RESOURCE_KEY, domain.createVKey().stringify())
|
||||
.param(PARAM_REQUESTED_TIME, clock.nowUtc().toString())
|
||||
.param(PARAM_RESAVE_TIMES, clock.nowUtc().plusDays(5).toString())
|
||||
.etaDelta(when.minus(standardSeconds(30)), when.plus(standardSeconds(30))));
|
||||
|
||||
@@ -521,7 +521,7 @@ class DomainTransferRequestFlowTest
|
||||
.method("POST")
|
||||
.header("Host", "backend.hostname.fake")
|
||||
.header("content-type", "application/x-www-form-urlencoded")
|
||||
.param(PARAM_RESOURCE_KEY, domain.createVKey().getOfyKey().getString())
|
||||
.param(PARAM_RESOURCE_KEY, domain.createVKey().stringify())
|
||||
.param(PARAM_REQUESTED_TIME, clock.nowUtc().toString())
|
||||
.etaDelta(
|
||||
registry.getAutomaticTransferLength().minus(standardSeconds(30)),
|
||||
|
||||
@@ -16,6 +16,7 @@ package google.registry.flows.host;
|
||||
|
||||
import static com.google.common.base.Strings.nullToEmpty;
|
||||
import static com.google.common.truth.Truth.assertThat;
|
||||
import static google.registry.batch.AsyncTaskEnqueuer.PARAM_HOST_KEY;
|
||||
import static google.registry.batch.AsyncTaskEnqueuer.QUEUE_ASYNC_HOST_RENAME;
|
||||
import static google.registry.model.EppResourceUtils.loadByForeignKey;
|
||||
import static google.registry.persistence.transaction.TransactionManagerFactory.tm;
|
||||
@@ -230,7 +231,7 @@ class HostUpdateFlowTest extends ResourceFlowTestCase<HostUpdateFlow, HostResour
|
||||
assertTasksEnqueued(
|
||||
QUEUE_ASYNC_HOST_RENAME,
|
||||
new TaskMatcher()
|
||||
.param("hostKey", renamedHost.createVKey().getOfyKey().getString())
|
||||
.param(PARAM_HOST_KEY, renamedHost.createVKey().stringify())
|
||||
.param("requestedTime", clock.nowUtc().toString()));
|
||||
}
|
||||
|
||||
|
||||
@@ -14,6 +14,7 @@
|
||||
|
||||
package google.registry.model.tld.label;
|
||||
|
||||
import static com.google.common.collect.ImmutableMap.toImmutableMap;
|
||||
import static com.google.common.truth.Truth.assertThat;
|
||||
import static com.google.common.truth.Truth8.assertThat;
|
||||
import static google.registry.persistence.transaction.TransactionManagerFactory.jpaTm;
|
||||
@@ -24,14 +25,18 @@ import static org.joda.money.CurrencyUnit.USD;
|
||||
import static org.joda.time.Duration.standardDays;
|
||||
import static org.junit.jupiter.api.Assertions.assertThrows;
|
||||
|
||||
import com.google.common.base.Stopwatch;
|
||||
import com.google.common.collect.ImmutableList;
|
||||
import com.google.common.collect.ImmutableMap;
|
||||
import com.google.common.flogger.FluentLogger;
|
||||
import google.registry.persistence.transaction.TransactionManagerUtil;
|
||||
import google.registry.testing.AppEngineExtension;
|
||||
import google.registry.testing.FakeClock;
|
||||
import google.registry.testing.TestCacheExtension;
|
||||
import java.math.BigDecimal;
|
||||
import java.util.Optional;
|
||||
import java.util.concurrent.TimeUnit;
|
||||
import java.util.stream.IntStream;
|
||||
import org.joda.money.CurrencyUnit;
|
||||
import org.joda.money.Money;
|
||||
import org.junit.jupiter.api.BeforeEach;
|
||||
@@ -41,6 +46,8 @@ import org.junit.jupiter.api.extension.RegisterExtension;
|
||||
/** Unit tests for {@link PremiumListDao}. */
|
||||
public class PremiumListDaoTest {
|
||||
|
||||
private static final FluentLogger logger = FluentLogger.forEnclosingClass();
|
||||
|
||||
private final FakeClock fakeClock = new FakeClock();
|
||||
|
||||
@RegisterExtension
|
||||
@@ -260,6 +267,27 @@ public class PremiumListDaoTest {
|
||||
assertThat(PremiumListDao.premiumListCache.getIfPresent("testname")).isNull();
|
||||
}
|
||||
|
||||
@Test
|
||||
void testSave_largeSize_savedQuickly() {
|
||||
Stopwatch stopwatch = Stopwatch.createStarted();
|
||||
ImmutableMap<String, BigDecimal> prices =
|
||||
IntStream.range(0, 20000).boxed().collect(toImmutableMap(String::valueOf, BigDecimal::new));
|
||||
PremiumList list =
|
||||
new PremiumList.Builder()
|
||||
.setName("testname")
|
||||
.setCurrency(USD)
|
||||
.setLabelsToPrices(prices)
|
||||
.setCreationTimestamp(fakeClock.nowUtc())
|
||||
.build();
|
||||
PremiumListDao.save(list);
|
||||
long duration = stopwatch.stop().elapsed(TimeUnit.MILLISECONDS);
|
||||
if (duration >= 6000) {
|
||||
// Don't fail directly since we can't rely on what sort of machines the test is running on
|
||||
logger.atSevere().log(
|
||||
"Expected premium list update to take 2-3 seconds but it took %d ms", duration);
|
||||
}
|
||||
}
|
||||
|
||||
private static Money moneyOf(CurrencyUnit unit, double amount) {
|
||||
return Money.of(unit, BigDecimal.valueOf(amount).setScale(unit.getDecimalPlaces()));
|
||||
}
|
||||
|
||||
@@ -29,6 +29,7 @@ import com.googlecode.objectify.annotation.Entity;
|
||||
import google.registry.model.billing.BillingEvent.OneTime;
|
||||
import google.registry.model.common.ClassPathManager;
|
||||
import google.registry.model.domain.DomainBase;
|
||||
import google.registry.model.host.HostResource;
|
||||
import google.registry.model.registrar.RegistrarContact;
|
||||
import google.registry.testing.AppEngineExtension;
|
||||
import google.registry.testing.TaskQueueHelper.TaskMatcher;
|
||||
@@ -207,6 +208,29 @@ class VKeyTest {
|
||||
.isEqualTo(VKey.createSql(TestObject.class, "foo"));
|
||||
}
|
||||
|
||||
@Test
|
||||
void testCreate_stringifiedVKey_resourceKeyFromTaskQueue() throws Exception {
|
||||
VKey<HostResource> vkeyFromNewWebsafeKey =
|
||||
VKey.create(
|
||||
"kind:HostResource@sql:rO0ABXQADzZCQjJGNDc2LUdPT0dMRQ@ofy:ahdzfm"
|
||||
+ "RvbWFpbi1yZWdpc3RyeS1hbHBoYXIhCxIMSG9zdFJlc291cmNlIg82QkIyRjQ3Ni1HT09HTEUM");
|
||||
|
||||
assertThat(vkeyFromNewWebsafeKey.getSqlKey()).isEqualTo("6BB2F476-GOOGLE");
|
||||
assertThat(vkeyFromNewWebsafeKey.getOfyKey().getString())
|
||||
.isEqualTo(
|
||||
"ahdzfmRvbWFpb"
|
||||
+ "i1yZWdpc3RyeS1hbHBoYXIhCxIMSG9zdFJlc291cmNlIg82QkIyRjQ3Ni1HT09HTEUM");
|
||||
|
||||
// the ofy portion of the new vkey string representation was the old vkey string representation
|
||||
VKey<HostResource> vkeyFromOldWebsafeString =
|
||||
VKey.fromWebsafeKey(
|
||||
"ahdzfmRvbW"
|
||||
+ "Fpbi1yZWdpc3RyeS1hbHBoYXIhCxIMSG9zdFJlc291cmNlIg82QkIyRjQ3Ni1HT09HTEUM");
|
||||
|
||||
// the following is assertion is ensure backwork compatibility
|
||||
assertThat(vkeyFromNewWebsafeKey).isEqualTo(vkeyFromOldWebsafeString);
|
||||
}
|
||||
|
||||
@Test
|
||||
void testCreate_stringifedVKey_ofyOnlyVKeyString() {
|
||||
assertThat(VKey.create("kind:TestObject@ofy:agR0ZXN0chMLEgpUZXN0T2JqZWN0IgNmb28M"))
|
||||
|
||||
@@ -227,22 +227,20 @@ public class CloudTasksHelper implements Serializable {
|
||||
});
|
||||
headers = headerBuilder.build();
|
||||
ImmutableMultimap.Builder<String, String> paramBuilder = new ImmutableMultimap.Builder<>();
|
||||
String query = null;
|
||||
// Note that UriParameters.parse() does not throw an IAE on a bad query string (e.g. one
|
||||
// where parameters are not properly URL-encoded); it always does a best-effort parse.
|
||||
if (method == HttpMethod.GET) {
|
||||
query = uri.getQuery();
|
||||
} else if (method == HttpMethod.POST) {
|
||||
paramBuilder.putAll(UriParameters.parse(uri.getQuery()));
|
||||
} else if (method == HttpMethod.POST && !task.getAppEngineHttpRequest().getBody().isEmpty()) {
|
||||
assertThat(
|
||||
headers.containsEntry(
|
||||
Ascii.toLowerCase(HttpHeaders.CONTENT_TYPE), MediaType.FORM_DATA.toString()))
|
||||
.isTrue();
|
||||
query = task.getAppEngineHttpRequest().getBody().toString(StandardCharsets.UTF_8);
|
||||
}
|
||||
if (query != null) {
|
||||
// Note that UriParameters.parse() does not throw an IAE on a bad query string (e.g. one
|
||||
// where parameters are not properly URL-encoded); it always does a best-effort parse.
|
||||
paramBuilder.putAll(UriParameters.parse(query));
|
||||
params = paramBuilder.build();
|
||||
paramBuilder.putAll(
|
||||
UriParameters.parse(
|
||||
task.getAppEngineHttpRequest().getBody().toString(StandardCharsets.UTF_8)));
|
||||
}
|
||||
params = paramBuilder.build();
|
||||
}
|
||||
|
||||
public Map<String, Object> toMap() {
|
||||
|
||||
@@ -60,7 +60,6 @@ import com.google.common.collect.ImmutableMap;
|
||||
import com.google.common.collect.ImmutableSet;
|
||||
import com.google.common.collect.ImmutableSortedMap;
|
||||
import com.google.common.collect.Iterables;
|
||||
import com.google.common.collect.Streams;
|
||||
import com.google.common.net.InetAddresses;
|
||||
import com.googlecode.objectify.Key;
|
||||
import google.registry.dns.writer.VoidDnsWriter;
|
||||
@@ -1264,7 +1263,7 @@ public class DatabaseHelper {
|
||||
|
||||
/** Returns the entire map of {@link PremiumEntry}s for the given {@link PremiumList}. */
|
||||
public static ImmutableMap<String, PremiumEntry> loadPremiumEntries(PremiumList premiumList) {
|
||||
return Streams.stream(PremiumListDao.loadAllPremiumEntries(premiumList.getName()))
|
||||
return PremiumListDao.loadAllPremiumEntries(premiumList.getName()).stream()
|
||||
.collect(toImmutableMap(PremiumEntry::getDomainLabel, Function.identity()));
|
||||
}
|
||||
|
||||
|
||||
@@ -134,7 +134,7 @@ public abstract class CommandTestCase<C extends Command> {
|
||||
}
|
||||
|
||||
/** Writes the data to a named temporary file and then returns a path to the file. */
|
||||
private String writeToNamedTmpFile(String filename, byte[] data) throws IOException {
|
||||
protected String writeToNamedTmpFile(String filename, byte[] data) throws IOException {
|
||||
Path tmpFile = tmpDir.resolve(filename);
|
||||
Files.write(data, tmpFile.toFile());
|
||||
return tmpFile.toString();
|
||||
@@ -151,7 +151,7 @@ public abstract class CommandTestCase<C extends Command> {
|
||||
}
|
||||
|
||||
/** Writes the data to a temporary file and then returns a path to the file. */
|
||||
String writeToTmpFile(byte[] data) throws IOException {
|
||||
public String writeToTmpFile(byte[] data) throws IOException {
|
||||
return writeToNamedTmpFile("tmp_file", data);
|
||||
}
|
||||
|
||||
@@ -220,7 +220,7 @@ public abstract class CommandTestCase<C extends Command> {
|
||||
assertThat(getStderrAsString()).doesNotContain(expected);
|
||||
}
|
||||
|
||||
String getStdoutAsString() {
|
||||
protected String getStdoutAsString() {
|
||||
return new String(stdout.toByteArray(), UTF_8);
|
||||
}
|
||||
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user