mirror of
https://github.com/google/nomulus
synced 2026-05-17 13:21:48 +00:00
Compare commits
59 Commits
nomulus-20
...
nomulus-20
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
bcc1924b24 | ||
|
|
f86936788e | ||
|
|
13f61dd7b9 | ||
|
|
17cd9ba4f1 | ||
|
|
f983d564f8 | ||
|
|
bdf9124e87 | ||
|
|
d73e557acc | ||
|
|
0e74df82df | ||
|
|
b59d2ae419 | ||
|
|
d7e65d95e6 | ||
|
|
1d96de98c9 | ||
|
|
29bf0f3965 | ||
|
|
5100057dd5 | ||
|
|
1e7260e022 | ||
|
|
73ba96a5d4 | ||
|
|
90db60643e | ||
|
|
98283a67ac | ||
|
|
e70f14001c | ||
|
|
22d3612be3 | ||
|
|
ad8bc05877 | ||
|
|
a3537447ef | ||
|
|
4e66fed497 | ||
|
|
886cdfa39b | ||
|
|
beefa9364b | ||
|
|
73210e4b09 | ||
|
|
08cec96a93 | ||
|
|
31ef402c50 | ||
|
|
e89cc4406a | ||
|
|
48de5d8375 | ||
|
|
59abc1d154 | ||
|
|
6794c6fbd7 | ||
|
|
0c384adc22 | ||
|
|
3b679058b0 | ||
|
|
9b5805f145 | ||
|
|
9e6f99face | ||
|
|
554e675303 | ||
|
|
3d33c81475 | ||
|
|
56e384aa4f | ||
|
|
f669e3ca59 | ||
|
|
c45129f9ac | ||
|
|
84d2b82050 | ||
|
|
0109d5e473 | ||
|
|
9e03ae453c | ||
|
|
7a62aa0602 | ||
|
|
6a1e86ff33 | ||
|
|
5bf618e671 | ||
|
|
b4676a9836 | ||
|
|
ef9f3aeada | ||
|
|
9c43aab8cd | ||
|
|
cb63c3dd80 | ||
|
|
2cf190e448 | ||
|
|
e550c94cbc | ||
|
|
6e2bbd1a7e | ||
|
|
495d7176d8 | ||
|
|
d7aab524e5 | ||
|
|
c5bfe31b73 | ||
|
|
9975bc2195 | ||
|
|
cb16a7649f | ||
|
|
d7e2b24468 |
@@ -34,3 +34,5 @@ Guy Bensky <guyben@google.com>
|
||||
Weimin Yu <weiminyu@google.com>
|
||||
Shicong Huang <shicong@google.com>
|
||||
Gustav Brodman <gbrodman@google.com>
|
||||
Sarah Botwinick <sarahbot@google.com>
|
||||
Legina Chen <legina@google.com>
|
||||
|
||||
@@ -3,16 +3,17 @@
|
||||
# This file is expected to be part of source control.
|
||||
antlr:antlr:2.7.7
|
||||
com.google.code.findbugs:jsr305:3.0.2
|
||||
com.google.errorprone:error_prone_annotations:2.3.2
|
||||
com.google.errorprone:error_prone_annotations:2.3.4
|
||||
com.google.guava:failureaccess:1.0.1
|
||||
com.google.guava:guava:28.1-jre
|
||||
com.google.guava:guava:29.0-jre
|
||||
com.google.guava:listenablefuture:9999.0-empty-to-avoid-conflict-with-guava
|
||||
com.google.j2objc:j2objc-annotations:1.3
|
||||
com.puppycrawl.tools:checkstyle:8.27
|
||||
com.puppycrawl.tools:checkstyle:8.37
|
||||
commons-beanutils:commons-beanutils:1.9.4
|
||||
commons-collections:commons-collections:3.2.2
|
||||
info.picocli:picocli:4.1.1
|
||||
net.sf.saxon:Saxon-HE:9.9.1-5
|
||||
org.antlr:antlr4-runtime:4.7.2
|
||||
org.checkerframework:checker-qual:2.8.1
|
||||
org.codehaus.mojo:animal-sniffer-annotations:1.18
|
||||
info.picocli:picocli:4.5.2
|
||||
net.sf.saxon:Saxon-HE:10.3
|
||||
org.antlr:antlr4-runtime:4.8-1
|
||||
org.checkerframework:checker-qual:2.11.1
|
||||
org.javassist:javassist:3.26.0-GA
|
||||
org.reflections:reflections:0.9.12
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
# This is a Gradle generated file for dependency locking.
|
||||
# Manual edits can break the build and are not advised.
|
||||
# This file is expected to be part of source control.
|
||||
org.jacoco:org.jacoco.agent:0.8.5
|
||||
org.jacoco:org.jacoco.agent:0.8.6
|
||||
|
||||
@@ -1,11 +1,11 @@
|
||||
# This is a Gradle generated file for dependency locking.
|
||||
# Manual edits can break the build and are not advised.
|
||||
# This file is expected to be part of source control.
|
||||
org.jacoco:org.jacoco.agent:0.8.5
|
||||
org.jacoco:org.jacoco.ant:0.8.5
|
||||
org.jacoco:org.jacoco.core:0.8.5
|
||||
org.jacoco:org.jacoco.report:0.8.5
|
||||
org.ow2.asm:asm-analysis:7.2
|
||||
org.ow2.asm:asm-commons:7.2
|
||||
org.ow2.asm:asm-tree:7.2
|
||||
org.ow2.asm:asm:7.2
|
||||
org.jacoco:org.jacoco.agent:0.8.6
|
||||
org.jacoco:org.jacoco.ant:0.8.6
|
||||
org.jacoco:org.jacoco.core:0.8.6
|
||||
org.jacoco:org.jacoco.report:0.8.6
|
||||
org.ow2.asm:asm-analysis:8.0.1
|
||||
org.ow2.asm:asm-commons:8.0.1
|
||||
org.ow2.asm:asm-tree:8.0.1
|
||||
org.ow2.asm:asm:8.0.1
|
||||
|
||||
@@ -3,16 +3,17 @@
|
||||
# This file is expected to be part of source control.
|
||||
antlr:antlr:2.7.7
|
||||
com.google.code.findbugs:jsr305:3.0.2
|
||||
com.google.errorprone:error_prone_annotations:2.3.2
|
||||
com.google.errorprone:error_prone_annotations:2.3.4
|
||||
com.google.guava:failureaccess:1.0.1
|
||||
com.google.guava:guava:28.1-jre
|
||||
com.google.guava:guava:29.0-jre
|
||||
com.google.guava:listenablefuture:9999.0-empty-to-avoid-conflict-with-guava
|
||||
com.google.j2objc:j2objc-annotations:1.3
|
||||
com.puppycrawl.tools:checkstyle:8.27
|
||||
com.puppycrawl.tools:checkstyle:8.37
|
||||
commons-beanutils:commons-beanutils:1.9.4
|
||||
commons-collections:commons-collections:3.2.2
|
||||
info.picocli:picocli:4.1.1
|
||||
net.sf.saxon:Saxon-HE:9.9.1-5
|
||||
org.antlr:antlr4-runtime:4.7.2
|
||||
org.checkerframework:checker-qual:2.8.1
|
||||
org.codehaus.mojo:animal-sniffer-annotations:1.18
|
||||
info.picocli:picocli:4.5.2
|
||||
net.sf.saxon:Saxon-HE:10.3
|
||||
org.antlr:antlr4-runtime:4.8-1
|
||||
org.checkerframework:checker-qual:2.11.1
|
||||
org.javassist:javassist:3.26.0-GA
|
||||
org.reflections:reflections:0.9.12
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
# This is a Gradle generated file for dependency locking.
|
||||
# Manual edits can break the build and are not advised.
|
||||
# This file is expected to be part of source control.
|
||||
org.jacoco:org.jacoco.agent:0.8.5
|
||||
org.jacoco:org.jacoco.agent:0.8.6
|
||||
|
||||
@@ -1,11 +1,11 @@
|
||||
# This is a Gradle generated file for dependency locking.
|
||||
# Manual edits can break the build and are not advised.
|
||||
# This file is expected to be part of source control.
|
||||
org.jacoco:org.jacoco.agent:0.8.5
|
||||
org.jacoco:org.jacoco.ant:0.8.5
|
||||
org.jacoco:org.jacoco.core:0.8.5
|
||||
org.jacoco:org.jacoco.report:0.8.5
|
||||
org.ow2.asm:asm-analysis:7.2
|
||||
org.ow2.asm:asm-commons:7.2
|
||||
org.ow2.asm:asm-tree:7.2
|
||||
org.ow2.asm:asm:7.2
|
||||
org.jacoco:org.jacoco.agent:0.8.6
|
||||
org.jacoco:org.jacoco.ant:0.8.6
|
||||
org.jacoco:org.jacoco.core:0.8.6
|
||||
org.jacoco:org.jacoco.report:0.8.6
|
||||
org.ow2.asm:asm-analysis:8.0.1
|
||||
org.ow2.asm:asm-commons:8.0.1
|
||||
org.ow2.asm:asm-tree:8.0.1
|
||||
org.ow2.asm:asm:8.0.1
|
||||
|
||||
@@ -65,7 +65,7 @@ class PresubmitCheck:
|
||||
for pattern in self.skipped_patterns:
|
||||
if pattern in file:
|
||||
return False
|
||||
with open(file, "r") as f:
|
||||
with open(file, "r", encoding='utf8') as f:
|
||||
file_content = f.read()
|
||||
matches = re.match(self.regex, file_content, re.DOTALL)
|
||||
if self.regex_type == FORBIDDEN:
|
||||
@@ -176,7 +176,44 @@ PRESUBMITS = {
|
||||
"js",
|
||||
{"/node_modules/", "google/registry/ui/js/util.js", "registrar_bin."},
|
||||
):
|
||||
"JavaScript files should not include console logging."
|
||||
"JavaScript files should not include console logging.",
|
||||
# SQL injection protection rule for java source file:
|
||||
# The sql template passed to createQuery/createNativeQuery methods must be
|
||||
# a variable name in UPPER_CASE_UNDERSCORE format, i.e., a static final
|
||||
# String variable. This forces the use of parameter-binding on all queries
|
||||
# that take parameters.
|
||||
# The rule would forbid invocation of createQuery(Criteria). However, this
|
||||
# can be handled by adding a helper method in an exempted class to make
|
||||
# the calls.
|
||||
# TODO(b/179158393): enable the 'ConstantName' Java style check to ensure
|
||||
# that non-final variables do not use the UPPER_CASE_UNDERSCORE format.
|
||||
PresubmitCheck(
|
||||
# Line 1: the method names we check and the opening parenthesis, which
|
||||
# marks the beginning of the first parameter
|
||||
# Line 2: The first parameter is a match if is NOT any of the following:
|
||||
# - final variable name: \s*([A-Z_]+
|
||||
# - string literal: "([^"]|\\")*"
|
||||
# - concatenation of literals: (\s*\+\s*"([^"]|\\")*")*
|
||||
# Line 3: , or the closing parenthesis, marking the end of the first
|
||||
# parameter
|
||||
r'.*\.create(Native)?Query\('
|
||||
r'(?!(\s*([A-Z_]+|"([^"]|\\")*"(\s*\+\s*"([^"]|\\")*")*)'
|
||||
r'(,|\s*\))))',
|
||||
"java",
|
||||
# ActivityReportingQueryBuilder deals with Dremel queries
|
||||
{"src/test", "ActivityReportingQueryBuilder.java",
|
||||
# TODO(b/179158393): Remove everything below, which should be done
|
||||
# using Criteria
|
||||
"ForeignKeyIndex.java",
|
||||
"HistoryEntryDao.java",
|
||||
"JpaTransactionManagerImpl.java",
|
||||
},
|
||||
):
|
||||
"The first String parameter to EntityManager.create(Native)Query "
|
||||
"methods must be one of the following:\n"
|
||||
" - A String literal\n"
|
||||
" - Concatenation of String literals only\n"
|
||||
" - The name of a static final String variable"
|
||||
}
|
||||
|
||||
# Note that this regex only works for one kind of Flyway file. If we want to
|
||||
@@ -241,7 +278,7 @@ def verify_flyway_index():
|
||||
|
||||
# Remove the sequence numbers and compare against the index file contents.
|
||||
files = [filename[1] for filename in sorted(files)]
|
||||
with open('db/src/main/resources/sql/flyway.txt') as index:
|
||||
with open('db/src/main/resources/sql/flyway.txt', encoding='utf8') as index:
|
||||
indexed_files = index.read().splitlines()
|
||||
if files != indexed_files:
|
||||
unindexed = set(files) - set(indexed_files)
|
||||
|
||||
@@ -172,6 +172,8 @@ dependencies {
|
||||
|
||||
compile deps['com.beust:jcommander']
|
||||
compile deps['com.google.api:gax']
|
||||
compile deps['com.google.api.grpc:proto-google-cloud-datastore-v1']
|
||||
compile deps['com.google.api.grpc:proto-google-common-protos']
|
||||
compile deps['com.google.api.grpc:proto-google-cloud-secretmanager-v1']
|
||||
compile deps['com.google.api-client:google-api-client']
|
||||
compile deps['com.google.api-client:google-api-client-appengine']
|
||||
@@ -196,6 +198,8 @@ dependencies {
|
||||
compile deps['com.google.appengine:appengine-remote-api']
|
||||
compile deps['com.google.auth:google-auth-library-credentials']
|
||||
compile deps['com.google.auth:google-auth-library-oauth2-http']
|
||||
compile deps['com.google.cloud.bigdataoss:util']
|
||||
compile deps['com.google.cloud.datastore:datastore-v1-proto-client']
|
||||
compile deps['com.google.cloud.sql:jdbc-socket-factory-core']
|
||||
runtimeOnly deps['com.google.cloud.sql:postgres-socket-factory']
|
||||
compile deps['com.google.cloud:google-cloud-secretmanager']
|
||||
@@ -600,7 +604,6 @@ task compileProdJS(type: JavaExec) {
|
||||
closureArgs << "--js=${jsDir}/soyutils_usegoog.js"
|
||||
closureArgs << "--js=${cssSourceDir}/registrar_bin.css.js"
|
||||
closureArgs << "--js=${jsSourceDir}/**.js"
|
||||
// TODO(shicong) Verify the compiled JS file works in Alpha
|
||||
closureArgs << "--js=${externsDir}/json.js"
|
||||
closureArgs << "--js=${soySourceDir}/**.js"
|
||||
args closureArgs
|
||||
@@ -737,6 +740,13 @@ project.tasks.create('initSqlPipeline', JavaExec) {
|
||||
}
|
||||
}
|
||||
|
||||
// Caller must provide projectId, GCP region, runner, and the kinds to delete
|
||||
// (comma-separated kind names or '*' for all). E.g.:
|
||||
// nom_build :core:bulkDeleteDatastore --args="--project=domain-registry-crash \
|
||||
// --region=us-central1 --runner=DataflowRunner --kindsToDelete=*"
|
||||
createToolTask(
|
||||
'bulkDeleteDatastore', 'google.registry.beam.datastore.BulkDeletePipeline')
|
||||
|
||||
project.tasks.create('generateSqlSchema', JavaExec) {
|
||||
classpath = sourceSets.nonprod.runtimeClasspath
|
||||
main = 'google.registry.tools.DevTool'
|
||||
@@ -828,9 +838,8 @@ task buildToolImage(dependsOn: nomulus, type: Exec) {
|
||||
commandLine 'docker', 'build', '-t', 'nomulus-tool', '.'
|
||||
}
|
||||
|
||||
task generateInitSqlPipelineGraph(type: Test) {
|
||||
include "**/InitSqlPipelineGraphTest.*"
|
||||
testNameIncludePatterns = ["**createPipeline_compareGraph"]
|
||||
task generateInitSqlPipelineGraph(type: FilteringTest) {
|
||||
tests = ['InitSqlPipelineGraphTest.createPipeline_compareGraph']
|
||||
ignoreFailures = true
|
||||
}
|
||||
|
||||
|
||||
@@ -3,16 +3,17 @@
|
||||
# This file is expected to be part of source control.
|
||||
antlr:antlr:2.7.7
|
||||
com.google.code.findbugs:jsr305:3.0.2
|
||||
com.google.errorprone:error_prone_annotations:2.3.2
|
||||
com.google.errorprone:error_prone_annotations:2.3.4
|
||||
com.google.guava:failureaccess:1.0.1
|
||||
com.google.guava:guava:28.1-jre
|
||||
com.google.guava:guava:29.0-jre
|
||||
com.google.guava:listenablefuture:9999.0-empty-to-avoid-conflict-with-guava
|
||||
com.google.j2objc:j2objc-annotations:1.3
|
||||
com.puppycrawl.tools:checkstyle:8.27
|
||||
com.puppycrawl.tools:checkstyle:8.37
|
||||
commons-beanutils:commons-beanutils:1.9.4
|
||||
commons-collections:commons-collections:3.2.2
|
||||
info.picocli:picocli:4.1.1
|
||||
net.sf.saxon:Saxon-HE:9.9.1-5
|
||||
org.antlr:antlr4-runtime:4.7.2
|
||||
org.checkerframework:checker-qual:2.8.1
|
||||
org.codehaus.mojo:animal-sniffer-annotations:1.18
|
||||
info.picocli:picocli:4.5.2
|
||||
net.sf.saxon:Saxon-HE:10.3
|
||||
org.antlr:antlr4-runtime:4.8-1
|
||||
org.checkerframework:checker-qual:2.11.1
|
||||
org.javassist:javassist:3.26.0-GA
|
||||
org.reflections:reflections:0.9.12
|
||||
|
||||
@@ -6,10 +6,13 @@ aopalliance:aopalliance:1.0
|
||||
args4j:args4j:2.33
|
||||
cglib:cglib-nodep:2.2
|
||||
com.beust:jcommander:1.60
|
||||
com.fasterxml.jackson.core:jackson-annotations:2.10.2
|
||||
com.fasterxml.jackson.core:jackson-annotations:2.10.3
|
||||
com.fasterxml.jackson.core:jackson-core:2.11.3
|
||||
com.fasterxml.jackson.core:jackson-databind:2.10.2
|
||||
com.fasterxml:classmate:1.5.1
|
||||
com.github.docker-java:docker-java-api:3.2.7
|
||||
com.github.docker-java:docker-java-transport-zerodep:3.2.7
|
||||
com.github.docker-java:docker-java-transport:3.2.7
|
||||
com.github.jnr:jffi:1.2.23
|
||||
com.github.jnr:jnr-a64asm:1.0.0
|
||||
com.github.jnr:jnr-constants:0.9.15
|
||||
@@ -121,8 +124,6 @@ com.googlecode.charts4j:charts4j:1.3
|
||||
com.googlecode.json-simple:json-simple:1.1.1
|
||||
com.ibm.icu:icu4j:57.1
|
||||
com.jcraft:jsch:0.1.55
|
||||
com.kohlschutter.junixsocket:junixsocket-common:2.0.4
|
||||
com.kohlschutter.junixsocket:junixsocket-native-common:2.0.4
|
||||
com.squareup.okhttp:okhttp:2.5.0
|
||||
com.squareup.okio:okio:1.13.0
|
||||
com.sun.istack:istack-commons-runtime:3.0.7
|
||||
@@ -180,7 +181,6 @@ jline:jline:1.0
|
||||
joda-time:joda-time:2.10.5
|
||||
junit:junit:4.12
|
||||
net.bytebuddy:byte-buddy:1.10.17
|
||||
net.java.dev.jna:jna-platform:5.5.0
|
||||
net.java.dev.jna:jna:5.5.0
|
||||
org.apache.avro:avro:1.8.2
|
||||
org.apache.beam:beam-model-fn-execution:2.23.0
|
||||
@@ -226,7 +226,6 @@ org.javassist:javassist:3.24.0-GA
|
||||
org.jboss.logging:jboss-logging:3.4.1.Final
|
||||
org.jboss.spec.javax.transaction:jboss-transaction-api_1.2_spec:1.1.1.Final
|
||||
org.jboss:jandex:2.1.3.Final
|
||||
org.jetbrains:annotations:19.0.0
|
||||
org.joda:joda-money:1.0.1
|
||||
org.json:json:20160810
|
||||
org.jsoup:jsoup:1.13.1
|
||||
@@ -242,13 +241,11 @@ org.ow2.asm:asm:8.0.1
|
||||
org.postgresql:postgresql:42.2.18
|
||||
org.rnorth.duct-tape:duct-tape:1.0.8
|
||||
org.rnorth.visible-assertions:visible-assertions:2.1.2
|
||||
org.rnorth:tcp-unix-socket-proxy:1.0.2
|
||||
org.scijava:native-lib-loader:2.0.2
|
||||
org.slf4j:slf4j-api:1.7.30
|
||||
org.testcontainers:database-commons:1.14.3
|
||||
org.testcontainers:jdbc:1.14.3
|
||||
org.testcontainers:postgresql:1.14.3
|
||||
org.testcontainers:testcontainers:1.14.3
|
||||
org.testcontainers:database-commons:1.15.1
|
||||
org.testcontainers:jdbc:1.15.1
|
||||
org.testcontainers:postgresql:1.15.1
|
||||
org.testcontainers:testcontainers:1.15.1
|
||||
org.threeten:threetenbp:1.4.5
|
||||
org.tukaani:xz:1.8
|
||||
org.w3c.css:sac:1.3
|
||||
|
||||
@@ -6,10 +6,13 @@ aopalliance:aopalliance:1.0
|
||||
args4j:args4j:2.33
|
||||
cglib:cglib-nodep:2.2
|
||||
com.beust:jcommander:1.60
|
||||
com.fasterxml.jackson.core:jackson-annotations:2.10.2
|
||||
com.fasterxml.jackson.core:jackson-annotations:2.10.3
|
||||
com.fasterxml.jackson.core:jackson-core:2.11.3
|
||||
com.fasterxml.jackson.core:jackson-databind:2.10.2
|
||||
com.fasterxml:classmate:1.5.1
|
||||
com.github.docker-java:docker-java-api:3.2.7
|
||||
com.github.docker-java:docker-java-transport-zerodep:3.2.7
|
||||
com.github.docker-java:docker-java-transport:3.2.7
|
||||
com.github.jnr:jffi:1.2.23
|
||||
com.github.jnr:jnr-a64asm:1.0.0
|
||||
com.github.jnr:jnr-constants:0.9.15
|
||||
@@ -119,8 +122,6 @@ com.googlecode.charts4j:charts4j:1.3
|
||||
com.googlecode.json-simple:json-simple:1.1.1
|
||||
com.ibm.icu:icu4j:57.1
|
||||
com.jcraft:jsch:0.1.55
|
||||
com.kohlschutter.junixsocket:junixsocket-common:2.0.4
|
||||
com.kohlschutter.junixsocket:junixsocket-native-common:2.0.4
|
||||
com.squareup.okhttp:okhttp:2.5.0
|
||||
com.squareup.okio:okio:1.13.0
|
||||
com.sun.istack:istack-commons-runtime:3.0.7
|
||||
@@ -176,7 +177,6 @@ javax.xml.bind:jaxb-api:2.3.1
|
||||
jline:jline:1.0
|
||||
joda-time:joda-time:2.10.5
|
||||
net.bytebuddy:byte-buddy:1.10.17
|
||||
net.java.dev.jna:jna-platform:5.5.0
|
||||
net.java.dev.jna:jna:5.5.0
|
||||
org.apache.avro:avro:1.8.2
|
||||
org.apache.beam:beam-model-fn-execution:2.23.0
|
||||
@@ -220,7 +220,6 @@ org.javassist:javassist:3.24.0-GA
|
||||
org.jboss.logging:jboss-logging:3.4.1.Final
|
||||
org.jboss.spec.javax.transaction:jboss-transaction-api_1.2_spec:1.1.1.Final
|
||||
org.jboss:jandex:2.1.3.Final
|
||||
org.jetbrains:annotations:19.0.0
|
||||
org.joda:joda-money:1.0.1
|
||||
org.json:json:20160810
|
||||
org.jsoup:jsoup:1.13.1
|
||||
@@ -236,13 +235,11 @@ org.ow2.asm:asm:8.0.1
|
||||
org.postgresql:postgresql:42.2.18
|
||||
org.rnorth.duct-tape:duct-tape:1.0.8
|
||||
org.rnorth.visible-assertions:visible-assertions:2.1.2
|
||||
org.rnorth:tcp-unix-socket-proxy:1.0.2
|
||||
org.scijava:native-lib-loader:2.0.2
|
||||
org.slf4j:slf4j-api:1.7.30
|
||||
org.testcontainers:database-commons:1.14.3
|
||||
org.testcontainers:jdbc:1.14.3
|
||||
org.testcontainers:postgresql:1.14.3
|
||||
org.testcontainers:testcontainers:1.14.3
|
||||
org.testcontainers:database-commons:1.15.1
|
||||
org.testcontainers:jdbc:1.15.1
|
||||
org.testcontainers:postgresql:1.15.1
|
||||
org.testcontainers:testcontainers:1.15.1
|
||||
org.threeten:threetenbp:1.4.5
|
||||
org.tukaani:xz:1.8
|
||||
org.w3c.css:sac:1.3
|
||||
|
||||
@@ -10,10 +10,13 @@ com.eclipsesource.j2v8:j2v8_linux_x86_64:4.6.0
|
||||
com.eclipsesource.j2v8:j2v8_macosx_x86_64:4.6.0
|
||||
com.eclipsesource.j2v8:j2v8_win32_x86:4.6.0
|
||||
com.eclipsesource.j2v8:j2v8_win32_x86_64:4.6.0
|
||||
com.fasterxml.jackson.core:jackson-annotations:2.10.2
|
||||
com.fasterxml.jackson.core:jackson-annotations:2.10.3
|
||||
com.fasterxml.jackson.core:jackson-core:2.11.3
|
||||
com.fasterxml.jackson.core:jackson-databind:2.10.2
|
||||
com.fasterxml:classmate:1.5.1
|
||||
com.github.docker-java:docker-java-api:3.2.7
|
||||
com.github.docker-java:docker-java-transport-zerodep:3.2.7
|
||||
com.github.docker-java:docker-java-transport:3.2.7
|
||||
com.github.jnr:jffi:1.2.23
|
||||
com.github.jnr:jnr-a64asm:1.0.0
|
||||
com.github.jnr:jnr-constants:0.9.15
|
||||
@@ -126,8 +129,6 @@ com.googlecode.charts4j:charts4j:1.3
|
||||
com.googlecode.json-simple:json-simple:1.1.1
|
||||
com.ibm.icu:icu4j:57.1
|
||||
com.jcraft:jsch:0.1.55
|
||||
com.kohlschutter.junixsocket:junixsocket-common:2.0.4
|
||||
com.kohlschutter.junixsocket:junixsocket-native-common:2.0.4
|
||||
com.squareup.okhttp:okhttp:2.5.0
|
||||
com.squareup.okio:okio:1.13.0
|
||||
com.sun.istack:istack-commons-runtime:3.0.7
|
||||
@@ -189,7 +190,6 @@ joda-time:joda-time:2.10.5
|
||||
junit:junit:4.12
|
||||
net.arnx:nashorn-promise:0.1.1
|
||||
net.bytebuddy:byte-buddy:1.10.17
|
||||
net.java.dev.jna:jna-platform:5.5.0
|
||||
net.java.dev.jna:jna:5.5.0
|
||||
org.apache.avro:avro:1.8.2
|
||||
org.apache.beam:beam-model-fn-execution:2.23.0
|
||||
@@ -237,7 +237,6 @@ org.javassist:javassist:3.24.0-GA
|
||||
org.jboss.logging:jboss-logging:3.4.1.Final
|
||||
org.jboss.spec.javax.transaction:jboss-transaction-api_1.2_spec:1.1.1.Final
|
||||
org.jboss:jandex:2.1.3.Final
|
||||
org.jetbrains:annotations:19.0.0
|
||||
org.joda:joda-money:1.0.1
|
||||
org.json:json:20160810
|
||||
org.jsoup:jsoup:1.13.1
|
||||
@@ -253,16 +252,14 @@ org.ow2.asm:asm:8.0.1
|
||||
org.postgresql:postgresql:42.2.18
|
||||
org.rnorth.duct-tape:duct-tape:1.0.8
|
||||
org.rnorth.visible-assertions:visible-assertions:2.1.2
|
||||
org.rnorth:tcp-unix-socket-proxy:1.0.2
|
||||
org.scijava:native-lib-loader:2.0.2
|
||||
org.slf4j:jcl-over-slf4j:1.7.30
|
||||
org.slf4j:jul-to-slf4j:1.7.30
|
||||
org.slf4j:slf4j-api:1.7.30
|
||||
org.slf4j:slf4j-jdk14:1.7.28
|
||||
org.testcontainers:database-commons:1.14.3
|
||||
org.testcontainers:jdbc:1.14.3
|
||||
org.testcontainers:postgresql:1.14.3
|
||||
org.testcontainers:testcontainers:1.14.3
|
||||
org.testcontainers:database-commons:1.15.1
|
||||
org.testcontainers:jdbc:1.15.1
|
||||
org.testcontainers:postgresql:1.15.1
|
||||
org.testcontainers:testcontainers:1.15.1
|
||||
org.threeten:threetenbp:1.4.5
|
||||
org.tukaani:xz:1.8
|
||||
org.w3c.css:sac:1.3
|
||||
|
||||
@@ -10,10 +10,13 @@ com.eclipsesource.j2v8:j2v8_linux_x86_64:4.6.0
|
||||
com.eclipsesource.j2v8:j2v8_macosx_x86_64:4.6.0
|
||||
com.eclipsesource.j2v8:j2v8_win32_x86:4.6.0
|
||||
com.eclipsesource.j2v8:j2v8_win32_x86_64:4.6.0
|
||||
com.fasterxml.jackson.core:jackson-annotations:2.10.2
|
||||
com.fasterxml.jackson.core:jackson-annotations:2.10.3
|
||||
com.fasterxml.jackson.core:jackson-core:2.11.3
|
||||
com.fasterxml.jackson.core:jackson-databind:2.10.2
|
||||
com.fasterxml:classmate:1.5.1
|
||||
com.github.docker-java:docker-java-api:3.2.7
|
||||
com.github.docker-java:docker-java-transport-zerodep:3.2.7
|
||||
com.github.docker-java:docker-java-transport:3.2.7
|
||||
com.github.jnr:jffi:1.2.23
|
||||
com.github.jnr:jnr-a64asm:1.0.0
|
||||
com.github.jnr:jnr-constants:0.9.15
|
||||
@@ -126,8 +129,6 @@ com.googlecode.charts4j:charts4j:1.3
|
||||
com.googlecode.json-simple:json-simple:1.1.1
|
||||
com.ibm.icu:icu4j:57.1
|
||||
com.jcraft:jsch:0.1.55
|
||||
com.kohlschutter.junixsocket:junixsocket-common:2.0.4
|
||||
com.kohlschutter.junixsocket:junixsocket-native-common:2.0.4
|
||||
com.squareup.okhttp:okhttp:2.5.0
|
||||
com.squareup.okio:okio:1.13.0
|
||||
com.sun.istack:istack-commons-runtime:3.0.7
|
||||
@@ -188,7 +189,6 @@ jline:jline:1.0
|
||||
joda-time:joda-time:2.10.5
|
||||
net.arnx:nashorn-promise:0.1.1
|
||||
net.bytebuddy:byte-buddy:1.10.17
|
||||
net.java.dev.jna:jna-platform:5.5.0
|
||||
net.java.dev.jna:jna:5.5.0
|
||||
org.apache.avro:avro:1.8.2
|
||||
org.apache.beam:beam-model-fn-execution:2.23.0
|
||||
@@ -235,7 +235,6 @@ org.javassist:javassist:3.24.0-GA
|
||||
org.jboss.logging:jboss-logging:3.4.1.Final
|
||||
org.jboss.spec.javax.transaction:jboss-transaction-api_1.2_spec:1.1.1.Final
|
||||
org.jboss:jandex:2.1.3.Final
|
||||
org.jetbrains:annotations:19.0.0
|
||||
org.joda:joda-money:1.0.1
|
||||
org.json:json:20160810
|
||||
org.jsoup:jsoup:1.13.1
|
||||
@@ -251,16 +250,14 @@ org.ow2.asm:asm:8.0.1
|
||||
org.postgresql:postgresql:42.2.18
|
||||
org.rnorth.duct-tape:duct-tape:1.0.8
|
||||
org.rnorth.visible-assertions:visible-assertions:2.1.2
|
||||
org.rnorth:tcp-unix-socket-proxy:1.0.2
|
||||
org.scijava:native-lib-loader:2.0.2
|
||||
org.slf4j:jcl-over-slf4j:1.7.30
|
||||
org.slf4j:jul-to-slf4j:1.7.30
|
||||
org.slf4j:slf4j-api:1.7.30
|
||||
org.slf4j:slf4j-jdk14:1.7.28
|
||||
org.testcontainers:database-commons:1.14.3
|
||||
org.testcontainers:jdbc:1.14.3
|
||||
org.testcontainers:postgresql:1.14.3
|
||||
org.testcontainers:testcontainers:1.14.3
|
||||
org.testcontainers:database-commons:1.15.1
|
||||
org.testcontainers:jdbc:1.15.1
|
||||
org.testcontainers:postgresql:1.15.1
|
||||
org.testcontainers:testcontainers:1.15.1
|
||||
org.threeten:threetenbp:1.4.5
|
||||
org.tukaani:xz:1.8
|
||||
org.w3c.css:sac:1.3
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
# This is a Gradle generated file for dependency locking.
|
||||
# Manual edits can break the build and are not advised.
|
||||
# This file is expected to be part of source control.
|
||||
org.jacoco:org.jacoco.agent:0.8.5
|
||||
org.jacoco:org.jacoco.agent:0.8.6
|
||||
|
||||
@@ -1,11 +1,11 @@
|
||||
# This is a Gradle generated file for dependency locking.
|
||||
# Manual edits can break the build and are not advised.
|
||||
# This file is expected to be part of source control.
|
||||
org.jacoco:org.jacoco.agent:0.8.5
|
||||
org.jacoco:org.jacoco.ant:0.8.5
|
||||
org.jacoco:org.jacoco.core:0.8.5
|
||||
org.jacoco:org.jacoco.report:0.8.5
|
||||
org.ow2.asm:asm-analysis:7.2
|
||||
org.ow2.asm:asm-commons:7.2
|
||||
org.ow2.asm:asm-tree:7.2
|
||||
org.ow2.asm:asm:7.2
|
||||
org.jacoco:org.jacoco.agent:0.8.6
|
||||
org.jacoco:org.jacoco.ant:0.8.6
|
||||
org.jacoco:org.jacoco.core:0.8.6
|
||||
org.jacoco:org.jacoco.report:0.8.6
|
||||
org.ow2.asm:asm-analysis:8.0.1
|
||||
org.ow2.asm:asm-commons:8.0.1
|
||||
org.ow2.asm:asm-tree:8.0.1
|
||||
org.ow2.asm:asm:8.0.1
|
||||
|
||||
@@ -6,10 +6,13 @@ aopalliance:aopalliance:1.0
|
||||
args4j:args4j:2.33
|
||||
cglib:cglib-nodep:2.2
|
||||
com.beust:jcommander:1.60
|
||||
com.fasterxml.jackson.core:jackson-annotations:2.10.2
|
||||
com.fasterxml.jackson.core:jackson-annotations:2.10.3
|
||||
com.fasterxml.jackson.core:jackson-core:2.11.3
|
||||
com.fasterxml.jackson.core:jackson-databind:2.10.2
|
||||
com.fasterxml:classmate:1.5.1
|
||||
com.github.docker-java:docker-java-api:3.2.7
|
||||
com.github.docker-java:docker-java-transport-zerodep:3.2.7
|
||||
com.github.docker-java:docker-java-transport:3.2.7
|
||||
com.github.jnr:jffi:1.2.23
|
||||
com.github.jnr:jnr-a64asm:1.0.0
|
||||
com.github.jnr:jnr-constants:0.9.15
|
||||
@@ -121,8 +124,6 @@ com.googlecode.charts4j:charts4j:1.3
|
||||
com.googlecode.json-simple:json-simple:1.1.1
|
||||
com.ibm.icu:icu4j:57.1
|
||||
com.jcraft:jsch:0.1.55
|
||||
com.kohlschutter.junixsocket:junixsocket-common:2.0.4
|
||||
com.kohlschutter.junixsocket:junixsocket-native-common:2.0.4
|
||||
com.squareup.okhttp:okhttp:2.5.0
|
||||
com.squareup.okio:okio:1.13.0
|
||||
com.sun.istack:istack-commons-runtime:3.0.7
|
||||
@@ -180,7 +181,6 @@ jline:jline:1.0
|
||||
joda-time:joda-time:2.10.5
|
||||
junit:junit:4.12
|
||||
net.bytebuddy:byte-buddy:1.10.17
|
||||
net.java.dev.jna:jna-platform:5.5.0
|
||||
net.java.dev.jna:jna:5.5.0
|
||||
org.apache.avro:avro:1.8.2
|
||||
org.apache.beam:beam-model-fn-execution:2.23.0
|
||||
@@ -226,7 +226,6 @@ org.javassist:javassist:3.24.0-GA
|
||||
org.jboss.logging:jboss-logging:3.4.1.Final
|
||||
org.jboss.spec.javax.transaction:jboss-transaction-api_1.2_spec:1.1.1.Final
|
||||
org.jboss:jandex:2.1.3.Final
|
||||
org.jetbrains:annotations:19.0.0
|
||||
org.joda:joda-money:1.0.1
|
||||
org.json:json:20160810
|
||||
org.jsoup:jsoup:1.13.1
|
||||
@@ -242,13 +241,11 @@ org.ow2.asm:asm:8.0.1
|
||||
org.postgresql:postgresql:42.2.18
|
||||
org.rnorth.duct-tape:duct-tape:1.0.8
|
||||
org.rnorth.visible-assertions:visible-assertions:2.1.2
|
||||
org.rnorth:tcp-unix-socket-proxy:1.0.2
|
||||
org.scijava:native-lib-loader:2.0.2
|
||||
org.slf4j:slf4j-api:1.7.30
|
||||
org.testcontainers:database-commons:1.14.3
|
||||
org.testcontainers:jdbc:1.14.3
|
||||
org.testcontainers:postgresql:1.14.3
|
||||
org.testcontainers:testcontainers:1.14.3
|
||||
org.testcontainers:database-commons:1.15.1
|
||||
org.testcontainers:jdbc:1.15.1
|
||||
org.testcontainers:postgresql:1.15.1
|
||||
org.testcontainers:testcontainers:1.15.1
|
||||
org.threeten:threetenbp:1.4.5
|
||||
org.tukaani:xz:1.8
|
||||
org.w3c.css:sac:1.3
|
||||
|
||||
@@ -6,10 +6,13 @@ aopalliance:aopalliance:1.0
|
||||
args4j:args4j:2.33
|
||||
cglib:cglib-nodep:2.2
|
||||
com.beust:jcommander:1.60
|
||||
com.fasterxml.jackson.core:jackson-annotations:2.10.2
|
||||
com.fasterxml.jackson.core:jackson-annotations:2.10.3
|
||||
com.fasterxml.jackson.core:jackson-core:2.11.3
|
||||
com.fasterxml.jackson.core:jackson-databind:2.10.2
|
||||
com.fasterxml:classmate:1.5.1
|
||||
com.github.docker-java:docker-java-api:3.2.7
|
||||
com.github.docker-java:docker-java-transport-zerodep:3.2.7
|
||||
com.github.docker-java:docker-java-transport:3.2.7
|
||||
com.github.jnr:jffi:1.2.23
|
||||
com.github.jnr:jnr-a64asm:1.0.0
|
||||
com.github.jnr:jnr-constants:0.9.15
|
||||
@@ -119,8 +122,6 @@ com.googlecode.charts4j:charts4j:1.3
|
||||
com.googlecode.json-simple:json-simple:1.1.1
|
||||
com.ibm.icu:icu4j:57.1
|
||||
com.jcraft:jsch:0.1.55
|
||||
com.kohlschutter.junixsocket:junixsocket-common:2.0.4
|
||||
com.kohlschutter.junixsocket:junixsocket-native-common:2.0.4
|
||||
com.squareup.okhttp:okhttp:2.5.0
|
||||
com.squareup.okio:okio:1.13.0
|
||||
com.sun.istack:istack-commons-runtime:3.0.7
|
||||
@@ -177,7 +178,6 @@ jline:jline:1.0
|
||||
joda-time:joda-time:2.10.5
|
||||
junit:junit:4.12
|
||||
net.bytebuddy:byte-buddy:1.10.17
|
||||
net.java.dev.jna:jna-platform:5.5.0
|
||||
net.java.dev.jna:jna:5.5.0
|
||||
org.apache.avro:avro:1.8.2
|
||||
org.apache.beam:beam-model-fn-execution:2.23.0
|
||||
@@ -222,7 +222,6 @@ org.javassist:javassist:3.24.0-GA
|
||||
org.jboss.logging:jboss-logging:3.4.1.Final
|
||||
org.jboss.spec.javax.transaction:jboss-transaction-api_1.2_spec:1.1.1.Final
|
||||
org.jboss:jandex:2.1.3.Final
|
||||
org.jetbrains:annotations:19.0.0
|
||||
org.joda:joda-money:1.0.1
|
||||
org.json:json:20160810
|
||||
org.jsoup:jsoup:1.13.1
|
||||
@@ -238,13 +237,11 @@ org.ow2.asm:asm:8.0.1
|
||||
org.postgresql:postgresql:42.2.18
|
||||
org.rnorth.duct-tape:duct-tape:1.0.8
|
||||
org.rnorth.visible-assertions:visible-assertions:2.1.2
|
||||
org.rnorth:tcp-unix-socket-proxy:1.0.2
|
||||
org.scijava:native-lib-loader:2.0.2
|
||||
org.slf4j:slf4j-api:1.7.30
|
||||
org.testcontainers:database-commons:1.14.3
|
||||
org.testcontainers:jdbc:1.14.3
|
||||
org.testcontainers:postgresql:1.14.3
|
||||
org.testcontainers:testcontainers:1.14.3
|
||||
org.testcontainers:database-commons:1.15.1
|
||||
org.testcontainers:jdbc:1.15.1
|
||||
org.testcontainers:postgresql:1.15.1
|
||||
org.testcontainers:testcontainers:1.15.1
|
||||
org.threeten:threetenbp:1.4.5
|
||||
org.tukaani:xz:1.8
|
||||
org.w3c.css:sac:1.3
|
||||
|
||||
@@ -10,10 +10,13 @@ com.eclipsesource.j2v8:j2v8_linux_x86_64:4.6.0
|
||||
com.eclipsesource.j2v8:j2v8_macosx_x86_64:4.6.0
|
||||
com.eclipsesource.j2v8:j2v8_win32_x86:4.6.0
|
||||
com.eclipsesource.j2v8:j2v8_win32_x86_64:4.6.0
|
||||
com.fasterxml.jackson.core:jackson-annotations:2.10.2
|
||||
com.fasterxml.jackson.core:jackson-annotations:2.10.3
|
||||
com.fasterxml.jackson.core:jackson-core:2.11.3
|
||||
com.fasterxml.jackson.core:jackson-databind:2.10.2
|
||||
com.fasterxml:classmate:1.5.1
|
||||
com.github.docker-java:docker-java-api:3.2.7
|
||||
com.github.docker-java:docker-java-transport-zerodep:3.2.7
|
||||
com.github.docker-java:docker-java-transport:3.2.7
|
||||
com.github.jnr:jffi:1.2.23
|
||||
com.github.jnr:jnr-a64asm:1.0.0
|
||||
com.github.jnr:jnr-constants:0.9.15
|
||||
@@ -125,8 +128,6 @@ com.googlecode.charts4j:charts4j:1.3
|
||||
com.googlecode.json-simple:json-simple:1.1.1
|
||||
com.ibm.icu:icu4j:57.1
|
||||
com.jcraft:jsch:0.1.55
|
||||
com.kohlschutter.junixsocket:junixsocket-common:2.0.4
|
||||
com.kohlschutter.junixsocket:junixsocket-native-common:2.0.4
|
||||
com.squareup.okhttp:okhttp:2.5.0
|
||||
com.squareup.okio:okio:1.13.0
|
||||
com.sun.istack:istack-commons-runtime:3.0.7
|
||||
@@ -188,7 +189,6 @@ joda-time:joda-time:2.10.5
|
||||
junit:junit:4.12
|
||||
net.arnx:nashorn-promise:0.1.1
|
||||
net.bytebuddy:byte-buddy:1.10.17
|
||||
net.java.dev.jna:jna-platform:5.5.0
|
||||
net.java.dev.jna:jna:5.5.0
|
||||
org.apache.avro:avro:1.8.2
|
||||
org.apache.beam:beam-model-fn-execution:2.23.0
|
||||
@@ -236,7 +236,6 @@ org.javassist:javassist:3.24.0-GA
|
||||
org.jboss.logging:jboss-logging:3.4.1.Final
|
||||
org.jboss.spec.javax.transaction:jboss-transaction-api_1.2_spec:1.1.1.Final
|
||||
org.jboss:jandex:2.1.3.Final
|
||||
org.jetbrains:annotations:19.0.0
|
||||
org.joda:joda-money:1.0.1
|
||||
org.json:json:20160810
|
||||
org.jsoup:jsoup:1.13.1
|
||||
@@ -252,15 +251,13 @@ org.ow2.asm:asm:8.0.1
|
||||
org.postgresql:postgresql:42.2.18
|
||||
org.rnorth.duct-tape:duct-tape:1.0.8
|
||||
org.rnorth.visible-assertions:visible-assertions:2.1.2
|
||||
org.rnorth:tcp-unix-socket-proxy:1.0.2
|
||||
org.scijava:native-lib-loader:2.0.2
|
||||
org.slf4j:jcl-over-slf4j:1.7.30
|
||||
org.slf4j:jul-to-slf4j:1.7.30
|
||||
org.slf4j:slf4j-api:1.7.30
|
||||
org.testcontainers:database-commons:1.14.3
|
||||
org.testcontainers:jdbc:1.14.3
|
||||
org.testcontainers:postgresql:1.14.3
|
||||
org.testcontainers:testcontainers:1.14.3
|
||||
org.testcontainers:database-commons:1.15.1
|
||||
org.testcontainers:jdbc:1.15.1
|
||||
org.testcontainers:postgresql:1.15.1
|
||||
org.testcontainers:testcontainers:1.15.1
|
||||
org.threeten:threetenbp:1.4.5
|
||||
org.tukaani:xz:1.8
|
||||
org.w3c.css:sac:1.3
|
||||
|
||||
@@ -10,10 +10,13 @@ com.eclipsesource.j2v8:j2v8_linux_x86_64:4.6.0
|
||||
com.eclipsesource.j2v8:j2v8_macosx_x86_64:4.6.0
|
||||
com.eclipsesource.j2v8:j2v8_win32_x86:4.6.0
|
||||
com.eclipsesource.j2v8:j2v8_win32_x86_64:4.6.0
|
||||
com.fasterxml.jackson.core:jackson-annotations:2.10.2
|
||||
com.fasterxml.jackson.core:jackson-annotations:2.10.3
|
||||
com.fasterxml.jackson.core:jackson-core:2.11.3
|
||||
com.fasterxml.jackson.core:jackson-databind:2.10.2
|
||||
com.fasterxml:classmate:1.5.1
|
||||
com.github.docker-java:docker-java-api:3.2.7
|
||||
com.github.docker-java:docker-java-transport-zerodep:3.2.7
|
||||
com.github.docker-java:docker-java-transport:3.2.7
|
||||
com.github.jnr:jffi:1.2.23
|
||||
com.github.jnr:jnr-a64asm:1.0.0
|
||||
com.github.jnr:jnr-constants:0.9.15
|
||||
@@ -125,8 +128,6 @@ com.googlecode.charts4j:charts4j:1.3
|
||||
com.googlecode.json-simple:json-simple:1.1.1
|
||||
com.ibm.icu:icu4j:57.1
|
||||
com.jcraft:jsch:0.1.55
|
||||
com.kohlschutter.junixsocket:junixsocket-common:2.0.4
|
||||
com.kohlschutter.junixsocket:junixsocket-native-common:2.0.4
|
||||
com.squareup.okhttp:okhttp:2.5.0
|
||||
com.squareup.okio:okio:1.13.0
|
||||
com.sun.istack:istack-commons-runtime:3.0.7
|
||||
@@ -188,7 +189,6 @@ joda-time:joda-time:2.10.5
|
||||
junit:junit:4.12
|
||||
net.arnx:nashorn-promise:0.1.1
|
||||
net.bytebuddy:byte-buddy:1.10.17
|
||||
net.java.dev.jna:jna-platform:5.5.0
|
||||
net.java.dev.jna:jna:5.5.0
|
||||
org.apache.avro:avro:1.8.2
|
||||
org.apache.beam:beam-model-fn-execution:2.23.0
|
||||
@@ -236,7 +236,6 @@ org.javassist:javassist:3.24.0-GA
|
||||
org.jboss.logging:jboss-logging:3.4.1.Final
|
||||
org.jboss.spec.javax.transaction:jboss-transaction-api_1.2_spec:1.1.1.Final
|
||||
org.jboss:jandex:2.1.3.Final
|
||||
org.jetbrains:annotations:19.0.0
|
||||
org.joda:joda-money:1.0.1
|
||||
org.json:json:20160810
|
||||
org.jsoup:jsoup:1.13.1
|
||||
@@ -252,15 +251,13 @@ org.ow2.asm:asm:8.0.1
|
||||
org.postgresql:postgresql:42.2.18
|
||||
org.rnorth.duct-tape:duct-tape:1.0.8
|
||||
org.rnorth.visible-assertions:visible-assertions:2.1.2
|
||||
org.rnorth:tcp-unix-socket-proxy:1.0.2
|
||||
org.scijava:native-lib-loader:2.0.2
|
||||
org.slf4j:jcl-over-slf4j:1.7.30
|
||||
org.slf4j:jul-to-slf4j:1.7.30
|
||||
org.slf4j:slf4j-api:1.7.30
|
||||
org.testcontainers:database-commons:1.14.3
|
||||
org.testcontainers:jdbc:1.14.3
|
||||
org.testcontainers:postgresql:1.14.3
|
||||
org.testcontainers:testcontainers:1.14.3
|
||||
org.testcontainers:database-commons:1.15.1
|
||||
org.testcontainers:jdbc:1.15.1
|
||||
org.testcontainers:postgresql:1.15.1
|
||||
org.testcontainers:testcontainers:1.15.1
|
||||
org.threeten:threetenbp:1.4.5
|
||||
org.tukaani:xz:1.8
|
||||
org.w3c.css:sac:1.3
|
||||
|
||||
@@ -10,10 +10,13 @@ com.eclipsesource.j2v8:j2v8_linux_x86_64:4.6.0
|
||||
com.eclipsesource.j2v8:j2v8_macosx_x86_64:4.6.0
|
||||
com.eclipsesource.j2v8:j2v8_win32_x86:4.6.0
|
||||
com.eclipsesource.j2v8:j2v8_win32_x86_64:4.6.0
|
||||
com.fasterxml.jackson.core:jackson-annotations:2.10.2
|
||||
com.fasterxml.jackson.core:jackson-annotations:2.10.3
|
||||
com.fasterxml.jackson.core:jackson-core:2.11.3
|
||||
com.fasterxml.jackson.core:jackson-databind:2.10.2
|
||||
com.fasterxml:classmate:1.5.1
|
||||
com.github.docker-java:docker-java-api:3.2.7
|
||||
com.github.docker-java:docker-java-transport-zerodep:3.2.7
|
||||
com.github.docker-java:docker-java-transport:3.2.7
|
||||
com.github.jnr:jffi:1.2.23
|
||||
com.github.jnr:jnr-a64asm:1.0.0
|
||||
com.github.jnr:jnr-constants:0.9.15
|
||||
@@ -125,8 +128,6 @@ com.googlecode.charts4j:charts4j:1.3
|
||||
com.googlecode.json-simple:json-simple:1.1.1
|
||||
com.ibm.icu:icu4j:57.1
|
||||
com.jcraft:jsch:0.1.55
|
||||
com.kohlschutter.junixsocket:junixsocket-common:2.0.4
|
||||
com.kohlschutter.junixsocket:junixsocket-native-common:2.0.4
|
||||
com.squareup.okhttp:okhttp:2.5.0
|
||||
com.squareup.okio:okio:1.13.0
|
||||
com.sun.istack:istack-commons-runtime:3.0.7
|
||||
@@ -188,7 +189,6 @@ joda-time:joda-time:2.10.5
|
||||
junit:junit:4.12
|
||||
net.arnx:nashorn-promise:0.1.1
|
||||
net.bytebuddy:byte-buddy:1.10.17
|
||||
net.java.dev.jna:jna-platform:5.5.0
|
||||
net.java.dev.jna:jna:5.5.0
|
||||
org.apache.avro:avro:1.8.2
|
||||
org.apache.beam:beam-model-fn-execution:2.23.0
|
||||
@@ -236,7 +236,6 @@ org.javassist:javassist:3.24.0-GA
|
||||
org.jboss.logging:jboss-logging:3.4.1.Final
|
||||
org.jboss.spec.javax.transaction:jboss-transaction-api_1.2_spec:1.1.1.Final
|
||||
org.jboss:jandex:2.1.3.Final
|
||||
org.jetbrains:annotations:19.0.0
|
||||
org.joda:joda-money:1.0.1
|
||||
org.json:json:20160810
|
||||
org.jsoup:jsoup:1.13.1
|
||||
@@ -252,15 +251,13 @@ org.ow2.asm:asm:8.0.1
|
||||
org.postgresql:postgresql:42.2.18
|
||||
org.rnorth.duct-tape:duct-tape:1.0.8
|
||||
org.rnorth.visible-assertions:visible-assertions:2.1.2
|
||||
org.rnorth:tcp-unix-socket-proxy:1.0.2
|
||||
org.scijava:native-lib-loader:2.0.2
|
||||
org.slf4j:jcl-over-slf4j:1.7.30
|
||||
org.slf4j:jul-to-slf4j:1.7.30
|
||||
org.slf4j:slf4j-api:1.7.30
|
||||
org.testcontainers:database-commons:1.14.3
|
||||
org.testcontainers:jdbc:1.14.3
|
||||
org.testcontainers:postgresql:1.14.3
|
||||
org.testcontainers:testcontainers:1.14.3
|
||||
org.testcontainers:database-commons:1.15.1
|
||||
org.testcontainers:jdbc:1.15.1
|
||||
org.testcontainers:postgresql:1.15.1
|
||||
org.testcontainers:testcontainers:1.15.1
|
||||
org.threeten:threetenbp:1.4.5
|
||||
org.tukaani:xz:1.8
|
||||
org.w3c.css:sac:1.3
|
||||
|
||||
@@ -10,10 +10,13 @@ com.eclipsesource.j2v8:j2v8_linux_x86_64:4.6.0
|
||||
com.eclipsesource.j2v8:j2v8_macosx_x86_64:4.6.0
|
||||
com.eclipsesource.j2v8:j2v8_win32_x86:4.6.0
|
||||
com.eclipsesource.j2v8:j2v8_win32_x86_64:4.6.0
|
||||
com.fasterxml.jackson.core:jackson-annotations:2.10.2
|
||||
com.fasterxml.jackson.core:jackson-annotations:2.10.3
|
||||
com.fasterxml.jackson.core:jackson-core:2.11.3
|
||||
com.fasterxml.jackson.core:jackson-databind:2.10.2
|
||||
com.fasterxml:classmate:1.5.1
|
||||
com.github.docker-java:docker-java-api:3.2.7
|
||||
com.github.docker-java:docker-java-transport-zerodep:3.2.7
|
||||
com.github.docker-java:docker-java-transport:3.2.7
|
||||
com.github.jnr:jffi:1.2.23
|
||||
com.github.jnr:jnr-a64asm:1.0.0
|
||||
com.github.jnr:jnr-constants:0.9.15
|
||||
@@ -126,8 +129,6 @@ com.googlecode.charts4j:charts4j:1.3
|
||||
com.googlecode.json-simple:json-simple:1.1.1
|
||||
com.ibm.icu:icu4j:57.1
|
||||
com.jcraft:jsch:0.1.55
|
||||
com.kohlschutter.junixsocket:junixsocket-common:2.0.4
|
||||
com.kohlschutter.junixsocket:junixsocket-native-common:2.0.4
|
||||
com.squareup.okhttp:okhttp:2.5.0
|
||||
com.squareup.okio:okio:1.13.0
|
||||
com.sun.istack:istack-commons-runtime:3.0.7
|
||||
@@ -188,7 +189,6 @@ jline:jline:1.0
|
||||
joda-time:joda-time:2.10.5
|
||||
net.arnx:nashorn-promise:0.1.1
|
||||
net.bytebuddy:byte-buddy:1.10.17
|
||||
net.java.dev.jna:jna-platform:5.5.0
|
||||
net.java.dev.jna:jna:5.5.0
|
||||
org.apache.avro:avro:1.8.2
|
||||
org.apache.beam:beam-model-fn-execution:2.23.0
|
||||
@@ -235,7 +235,6 @@ org.javassist:javassist:3.24.0-GA
|
||||
org.jboss.logging:jboss-logging:3.4.1.Final
|
||||
org.jboss.spec.javax.transaction:jboss-transaction-api_1.2_spec:1.1.1.Final
|
||||
org.jboss:jandex:2.1.3.Final
|
||||
org.jetbrains:annotations:19.0.0
|
||||
org.joda:joda-money:1.0.1
|
||||
org.json:json:20160810
|
||||
org.jsoup:jsoup:1.13.1
|
||||
@@ -251,16 +250,14 @@ org.ow2.asm:asm:8.0.1
|
||||
org.postgresql:postgresql:42.2.18
|
||||
org.rnorth.duct-tape:duct-tape:1.0.8
|
||||
org.rnorth.visible-assertions:visible-assertions:2.1.2
|
||||
org.rnorth:tcp-unix-socket-proxy:1.0.2
|
||||
org.scijava:native-lib-loader:2.0.2
|
||||
org.slf4j:jcl-over-slf4j:1.7.30
|
||||
org.slf4j:jul-to-slf4j:1.7.30
|
||||
org.slf4j:slf4j-api:1.7.30
|
||||
org.slf4j:slf4j-jdk14:1.7.28
|
||||
org.testcontainers:database-commons:1.14.3
|
||||
org.testcontainers:jdbc:1.14.3
|
||||
org.testcontainers:postgresql:1.14.3
|
||||
org.testcontainers:testcontainers:1.14.3
|
||||
org.testcontainers:database-commons:1.15.1
|
||||
org.testcontainers:jdbc:1.15.1
|
||||
org.testcontainers:postgresql:1.15.1
|
||||
org.testcontainers:testcontainers:1.15.1
|
||||
org.threeten:threetenbp:1.4.5
|
||||
org.tukaani:xz:1.8
|
||||
org.w3c.css:sac:1.3
|
||||
|
||||
@@ -10,6 +10,9 @@ com.fasterxml.jackson.core:jackson-annotations:2.11.2
|
||||
com.fasterxml.jackson.core:jackson-core:2.11.3
|
||||
com.fasterxml.jackson.core:jackson-databind:2.11.2
|
||||
com.fasterxml:classmate:1.5.1
|
||||
com.github.docker-java:docker-java-api:3.2.7
|
||||
com.github.docker-java:docker-java-transport-zerodep:3.2.7
|
||||
com.github.docker-java:docker-java-transport:3.2.7
|
||||
com.github.jnr:jffi:1.2.23
|
||||
com.github.jnr:jnr-a64asm:1.0.0
|
||||
com.github.jnr:jnr-constants:0.9.15
|
||||
@@ -127,8 +130,6 @@ com.googlecode.java-diff-utils:diffutils:1.3.0
|
||||
com.googlecode.json-simple:json-simple:1.1.1
|
||||
com.ibm.icu:icu4j:57.1
|
||||
com.jcraft:jsch:0.1.55
|
||||
com.kohlschutter.junixsocket:junixsocket-common:2.0.4
|
||||
com.kohlschutter.junixsocket:junixsocket-native-common:2.0.4
|
||||
com.squareup.okhttp3:okhttp:3.11.0
|
||||
com.squareup.okhttp:okhttp:2.5.0
|
||||
com.squareup.okio:okio:1.14.0
|
||||
@@ -189,7 +190,6 @@ joda-time:joda-time:2.10.5
|
||||
junit:junit:4.13
|
||||
net.bytebuddy:byte-buddy-agent:1.10.5
|
||||
net.bytebuddy:byte-buddy:1.10.17
|
||||
net.java.dev.jna:jna-platform:5.5.0
|
||||
net.java.dev.jna:jna:5.5.0
|
||||
org.apache.avro:avro:1.8.2
|
||||
org.apache.beam:beam-model-fn-execution:2.23.0
|
||||
@@ -248,21 +248,20 @@ org.javassist:javassist:3.24.0-GA
|
||||
org.jboss.logging:jboss-logging:3.4.1.Final
|
||||
org.jboss.spec.javax.transaction:jboss-transaction-api_1.2_spec:1.1.1.Final
|
||||
org.jboss:jandex:2.1.3.Final
|
||||
org.jetbrains:annotations:19.0.0
|
||||
org.joda:joda-money:1.0.1
|
||||
org.json:json:20160810
|
||||
org.jsoup:jsoup:1.13.1
|
||||
org.junit-pioneer:junit-pioneer:0.7.0
|
||||
org.junit.jupiter:junit-jupiter-api:5.6.2
|
||||
org.junit.jupiter:junit-jupiter-engine:5.6.2
|
||||
org.junit.jupiter:junit-jupiter-migrationsupport:5.6.2
|
||||
org.junit.jupiter:junit-jupiter-params:5.6.2
|
||||
org.junit.platform:junit-platform-commons:1.6.2
|
||||
org.junit.platform:junit-platform-engine:1.6.2
|
||||
org.junit.platform:junit-platform-launcher:1.6.2
|
||||
org.junit.platform:junit-platform-runner:1.6.2
|
||||
org.junit.platform:junit-platform-suite-api:1.6.2
|
||||
org.junit:junit-bom:5.6.2
|
||||
org.junit.jupiter:junit-jupiter-api:5.7.0
|
||||
org.junit.jupiter:junit-jupiter-engine:5.7.0
|
||||
org.junit.jupiter:junit-jupiter-migrationsupport:5.7.0
|
||||
org.junit.jupiter:junit-jupiter-params:5.7.0
|
||||
org.junit.platform:junit-platform-commons:1.7.0
|
||||
org.junit.platform:junit-platform-engine:1.7.0
|
||||
org.junit.platform:junit-platform-launcher:1.7.0
|
||||
org.junit.platform:junit-platform-runner:1.7.0
|
||||
org.junit.platform:junit-platform-suite-api:1.7.0
|
||||
org.junit:junit-bom:5.7.0
|
||||
org.jvnet.staxex:stax-ex:1.8
|
||||
org.mockito:mockito-core:3.3.3
|
||||
org.mockito:mockito-junit-jupiter:3.3.3
|
||||
@@ -278,8 +277,6 @@ org.ow2.asm:asm:8.0.1
|
||||
org.postgresql:postgresql:42.2.18
|
||||
org.rnorth.duct-tape:duct-tape:1.0.8
|
||||
org.rnorth.visible-assertions:visible-assertions:2.1.2
|
||||
org.rnorth:tcp-unix-socket-proxy:1.0.2
|
||||
org.scijava:native-lib-loader:2.0.2
|
||||
org.seleniumhq.selenium:selenium-api:3.141.59
|
||||
org.seleniumhq.selenium:selenium-chrome-driver:3.141.59
|
||||
org.seleniumhq.selenium:selenium-edge-driver:3.141.59
|
||||
@@ -291,12 +288,12 @@ org.seleniumhq.selenium:selenium-remote-driver:3.141.59
|
||||
org.seleniumhq.selenium:selenium-safari-driver:3.141.59
|
||||
org.seleniumhq.selenium:selenium-support:3.141.59
|
||||
org.slf4j:slf4j-api:1.7.30
|
||||
org.testcontainers:database-commons:1.14.3
|
||||
org.testcontainers:jdbc:1.14.3
|
||||
org.testcontainers:junit-jupiter:1.14.3
|
||||
org.testcontainers:postgresql:1.14.3
|
||||
org.testcontainers:selenium:1.14.3
|
||||
org.testcontainers:testcontainers:1.14.3
|
||||
org.testcontainers:database-commons:1.15.1
|
||||
org.testcontainers:jdbc:1.15.1
|
||||
org.testcontainers:junit-jupiter:1.15.1
|
||||
org.testcontainers:postgresql:1.15.1
|
||||
org.testcontainers:selenium:1.15.1
|
||||
org.testcontainers:testcontainers:1.15.1
|
||||
org.threeten:threetenbp:1.4.5
|
||||
org.tukaani:xz:1.8
|
||||
org.w3c.css:sac:1.3
|
||||
|
||||
@@ -10,6 +10,9 @@ com.fasterxml.jackson.core:jackson-annotations:2.11.2
|
||||
com.fasterxml.jackson.core:jackson-core:2.11.3
|
||||
com.fasterxml.jackson.core:jackson-databind:2.11.2
|
||||
com.fasterxml:classmate:1.5.1
|
||||
com.github.docker-java:docker-java-api:3.2.7
|
||||
com.github.docker-java:docker-java-transport-zerodep:3.2.7
|
||||
com.github.docker-java:docker-java-transport:3.2.7
|
||||
com.github.jnr:jffi:1.2.23
|
||||
com.github.jnr:jnr-a64asm:1.0.0
|
||||
com.github.jnr:jnr-constants:0.9.15
|
||||
@@ -125,8 +128,6 @@ com.googlecode.java-diff-utils:diffutils:1.3.0
|
||||
com.googlecode.json-simple:json-simple:1.1.1
|
||||
com.ibm.icu:icu4j:57.1
|
||||
com.jcraft:jsch:0.1.55
|
||||
com.kohlschutter.junixsocket:junixsocket-common:2.0.4
|
||||
com.kohlschutter.junixsocket:junixsocket-native-common:2.0.4
|
||||
com.squareup.okhttp3:okhttp:3.11.0
|
||||
com.squareup.okhttp:okhttp:2.5.0
|
||||
com.squareup.okio:okio:1.14.0
|
||||
@@ -186,7 +187,6 @@ joda-time:joda-time:2.10.5
|
||||
junit:junit:4.13
|
||||
net.bytebuddy:byte-buddy-agent:1.10.5
|
||||
net.bytebuddy:byte-buddy:1.10.17
|
||||
net.java.dev.jna:jna-platform:5.5.0
|
||||
net.java.dev.jna:jna:5.5.0
|
||||
org.apache.avro:avro:1.8.2
|
||||
org.apache.beam:beam-model-fn-execution:2.23.0
|
||||
@@ -244,21 +244,20 @@ org.javassist:javassist:3.24.0-GA
|
||||
org.jboss.logging:jboss-logging:3.4.1.Final
|
||||
org.jboss.spec.javax.transaction:jboss-transaction-api_1.2_spec:1.1.1.Final
|
||||
org.jboss:jandex:2.1.3.Final
|
||||
org.jetbrains:annotations:19.0.0
|
||||
org.joda:joda-money:1.0.1
|
||||
org.json:json:20160810
|
||||
org.jsoup:jsoup:1.13.1
|
||||
org.junit-pioneer:junit-pioneer:0.7.0
|
||||
org.junit.jupiter:junit-jupiter-api:5.6.2
|
||||
org.junit.jupiter:junit-jupiter-engine:5.6.2
|
||||
org.junit.jupiter:junit-jupiter-migrationsupport:5.6.2
|
||||
org.junit.jupiter:junit-jupiter-params:5.6.2
|
||||
org.junit.platform:junit-platform-commons:1.6.2
|
||||
org.junit.platform:junit-platform-engine:1.6.2
|
||||
org.junit.platform:junit-platform-launcher:1.6.2
|
||||
org.junit.platform:junit-platform-runner:1.6.2
|
||||
org.junit.platform:junit-platform-suite-api:1.6.2
|
||||
org.junit:junit-bom:5.6.2
|
||||
org.junit.jupiter:junit-jupiter-api:5.7.0
|
||||
org.junit.jupiter:junit-jupiter-engine:5.7.0
|
||||
org.junit.jupiter:junit-jupiter-migrationsupport:5.7.0
|
||||
org.junit.jupiter:junit-jupiter-params:5.7.0
|
||||
org.junit.platform:junit-platform-commons:1.7.0
|
||||
org.junit.platform:junit-platform-engine:1.7.0
|
||||
org.junit.platform:junit-platform-launcher:1.7.0
|
||||
org.junit.platform:junit-platform-runner:1.7.0
|
||||
org.junit.platform:junit-platform-suite-api:1.7.0
|
||||
org.junit:junit-bom:5.7.0
|
||||
org.jvnet.staxex:stax-ex:1.8
|
||||
org.mockito:mockito-core:3.3.3
|
||||
org.mockito:mockito-junit-jupiter:3.3.3
|
||||
@@ -274,8 +273,6 @@ org.ow2.asm:asm:8.0.1
|
||||
org.postgresql:postgresql:42.2.18
|
||||
org.rnorth.duct-tape:duct-tape:1.0.8
|
||||
org.rnorth.visible-assertions:visible-assertions:2.1.2
|
||||
org.rnorth:tcp-unix-socket-proxy:1.0.2
|
||||
org.scijava:native-lib-loader:2.0.2
|
||||
org.seleniumhq.selenium:selenium-api:3.141.59
|
||||
org.seleniumhq.selenium:selenium-chrome-driver:3.141.59
|
||||
org.seleniumhq.selenium:selenium-edge-driver:3.141.59
|
||||
@@ -287,12 +284,12 @@ org.seleniumhq.selenium:selenium-remote-driver:3.141.59
|
||||
org.seleniumhq.selenium:selenium-safari-driver:3.141.59
|
||||
org.seleniumhq.selenium:selenium-support:3.141.59
|
||||
org.slf4j:slf4j-api:1.7.30
|
||||
org.testcontainers:database-commons:1.14.3
|
||||
org.testcontainers:jdbc:1.14.3
|
||||
org.testcontainers:junit-jupiter:1.14.3
|
||||
org.testcontainers:postgresql:1.14.3
|
||||
org.testcontainers:selenium:1.14.3
|
||||
org.testcontainers:testcontainers:1.14.3
|
||||
org.testcontainers:database-commons:1.15.1
|
||||
org.testcontainers:jdbc:1.15.1
|
||||
org.testcontainers:junit-jupiter:1.15.1
|
||||
org.testcontainers:postgresql:1.15.1
|
||||
org.testcontainers:selenium:1.15.1
|
||||
org.testcontainers:testcontainers:1.15.1
|
||||
org.threeten:threetenbp:1.4.5
|
||||
org.tukaani:xz:1.8
|
||||
org.w3c.css:sac:1.3
|
||||
|
||||
@@ -14,6 +14,9 @@ com.fasterxml.jackson.core:jackson-annotations:2.11.2
|
||||
com.fasterxml.jackson.core:jackson-core:2.11.3
|
||||
com.fasterxml.jackson.core:jackson-databind:2.11.2
|
||||
com.fasterxml:classmate:1.5.1
|
||||
com.github.docker-java:docker-java-api:3.2.7
|
||||
com.github.docker-java:docker-java-transport-zerodep:3.2.7
|
||||
com.github.docker-java:docker-java-transport:3.2.7
|
||||
com.github.jnr:jffi:1.2.23
|
||||
com.github.jnr:jnr-a64asm:1.0.0
|
||||
com.github.jnr:jnr-constants:0.9.15
|
||||
@@ -132,8 +135,6 @@ com.googlecode.java-diff-utils:diffutils:1.3.0
|
||||
com.googlecode.json-simple:json-simple:1.1.1
|
||||
com.ibm.icu:icu4j:57.1
|
||||
com.jcraft:jsch:0.1.55
|
||||
com.kohlschutter.junixsocket:junixsocket-common:2.0.4
|
||||
com.kohlschutter.junixsocket:junixsocket-native-common:2.0.4
|
||||
com.squareup.okhttp3:okhttp:3.11.0
|
||||
com.squareup.okhttp:okhttp:2.5.0
|
||||
com.squareup.okio:okio:1.14.0
|
||||
@@ -199,7 +200,6 @@ junit:junit:4.13
|
||||
net.arnx:nashorn-promise:0.1.1
|
||||
net.bytebuddy:byte-buddy-agent:1.10.5
|
||||
net.bytebuddy:byte-buddy:1.10.17
|
||||
net.java.dev.jna:jna-platform:5.5.0
|
||||
net.java.dev.jna:jna:5.5.0
|
||||
org.apache.avro:avro:1.8.2
|
||||
org.apache.beam:beam-model-fn-execution:2.23.0
|
||||
@@ -260,21 +260,20 @@ org.javassist:javassist:3.24.0-GA
|
||||
org.jboss.logging:jboss-logging:3.4.1.Final
|
||||
org.jboss.spec.javax.transaction:jboss-transaction-api_1.2_spec:1.1.1.Final
|
||||
org.jboss:jandex:2.1.3.Final
|
||||
org.jetbrains:annotations:19.0.0
|
||||
org.joda:joda-money:1.0.1
|
||||
org.json:json:20160810
|
||||
org.jsoup:jsoup:1.13.1
|
||||
org.junit-pioneer:junit-pioneer:0.7.0
|
||||
org.junit.jupiter:junit-jupiter-api:5.6.2
|
||||
org.junit.jupiter:junit-jupiter-engine:5.6.2
|
||||
org.junit.jupiter:junit-jupiter-migrationsupport:5.6.2
|
||||
org.junit.jupiter:junit-jupiter-params:5.6.2
|
||||
org.junit.platform:junit-platform-commons:1.6.2
|
||||
org.junit.platform:junit-platform-engine:1.6.2
|
||||
org.junit.platform:junit-platform-launcher:1.6.2
|
||||
org.junit.platform:junit-platform-runner:1.6.2
|
||||
org.junit.platform:junit-platform-suite-api:1.6.2
|
||||
org.junit:junit-bom:5.6.2
|
||||
org.junit.jupiter:junit-jupiter-api:5.7.0
|
||||
org.junit.jupiter:junit-jupiter-engine:5.7.0
|
||||
org.junit.jupiter:junit-jupiter-migrationsupport:5.7.0
|
||||
org.junit.jupiter:junit-jupiter-params:5.7.0
|
||||
org.junit.platform:junit-platform-commons:1.7.0
|
||||
org.junit.platform:junit-platform-engine:1.7.0
|
||||
org.junit.platform:junit-platform-launcher:1.7.0
|
||||
org.junit.platform:junit-platform-runner:1.7.0
|
||||
org.junit.platform:junit-platform-suite-api:1.7.0
|
||||
org.junit:junit-bom:5.7.0
|
||||
org.jvnet.staxex:stax-ex:1.8
|
||||
org.mockito:mockito-core:3.3.3
|
||||
org.mockito:mockito-junit-jupiter:3.3.3
|
||||
@@ -290,8 +289,6 @@ org.ow2.asm:asm:8.0.1
|
||||
org.postgresql:postgresql:42.2.18
|
||||
org.rnorth.duct-tape:duct-tape:1.0.8
|
||||
org.rnorth.visible-assertions:visible-assertions:2.1.2
|
||||
org.rnorth:tcp-unix-socket-proxy:1.0.2
|
||||
org.scijava:native-lib-loader:2.0.2
|
||||
org.seleniumhq.selenium:selenium-api:3.141.59
|
||||
org.seleniumhq.selenium:selenium-chrome-driver:3.141.59
|
||||
org.seleniumhq.selenium:selenium-edge-driver:3.141.59
|
||||
@@ -305,12 +302,12 @@ org.seleniumhq.selenium:selenium-support:3.141.59
|
||||
org.slf4j:jcl-over-slf4j:1.7.30
|
||||
org.slf4j:jul-to-slf4j:1.7.30
|
||||
org.slf4j:slf4j-api:1.7.30
|
||||
org.testcontainers:database-commons:1.14.3
|
||||
org.testcontainers:jdbc:1.14.3
|
||||
org.testcontainers:junit-jupiter:1.14.3
|
||||
org.testcontainers:postgresql:1.14.3
|
||||
org.testcontainers:selenium:1.14.3
|
||||
org.testcontainers:testcontainers:1.14.3
|
||||
org.testcontainers:database-commons:1.15.1
|
||||
org.testcontainers:jdbc:1.15.1
|
||||
org.testcontainers:junit-jupiter:1.15.1
|
||||
org.testcontainers:postgresql:1.15.1
|
||||
org.testcontainers:selenium:1.15.1
|
||||
org.testcontainers:testcontainers:1.15.1
|
||||
org.threeten:threetenbp:1.4.5
|
||||
org.tukaani:xz:1.8
|
||||
org.w3c.css:sac:1.3
|
||||
|
||||
@@ -14,6 +14,9 @@ com.fasterxml.jackson.core:jackson-annotations:2.11.2
|
||||
com.fasterxml.jackson.core:jackson-core:2.11.3
|
||||
com.fasterxml.jackson.core:jackson-databind:2.11.2
|
||||
com.fasterxml:classmate:1.5.1
|
||||
com.github.docker-java:docker-java-api:3.2.7
|
||||
com.github.docker-java:docker-java-transport-zerodep:3.2.7
|
||||
com.github.docker-java:docker-java-transport:3.2.7
|
||||
com.github.jnr:jffi:1.2.23
|
||||
com.github.jnr:jnr-a64asm:1.0.0
|
||||
com.github.jnr:jnr-constants:0.9.15
|
||||
@@ -132,8 +135,6 @@ com.googlecode.java-diff-utils:diffutils:1.3.0
|
||||
com.googlecode.json-simple:json-simple:1.1.1
|
||||
com.ibm.icu:icu4j:57.1
|
||||
com.jcraft:jsch:0.1.55
|
||||
com.kohlschutter.junixsocket:junixsocket-common:2.0.4
|
||||
com.kohlschutter.junixsocket:junixsocket-native-common:2.0.4
|
||||
com.squareup.okhttp3:okhttp:3.11.0
|
||||
com.squareup.okhttp:okhttp:2.5.0
|
||||
com.squareup.okio:okio:1.14.0
|
||||
@@ -199,7 +200,6 @@ junit:junit:4.13
|
||||
net.arnx:nashorn-promise:0.1.1
|
||||
net.bytebuddy:byte-buddy-agent:1.10.5
|
||||
net.bytebuddy:byte-buddy:1.10.17
|
||||
net.java.dev.jna:jna-platform:5.5.0
|
||||
net.java.dev.jna:jna:5.5.0
|
||||
org.apache.avro:avro:1.8.2
|
||||
org.apache.beam:beam-model-fn-execution:2.23.0
|
||||
@@ -260,21 +260,20 @@ org.javassist:javassist:3.24.0-GA
|
||||
org.jboss.logging:jboss-logging:3.4.1.Final
|
||||
org.jboss.spec.javax.transaction:jboss-transaction-api_1.2_spec:1.1.1.Final
|
||||
org.jboss:jandex:2.1.3.Final
|
||||
org.jetbrains:annotations:19.0.0
|
||||
org.joda:joda-money:1.0.1
|
||||
org.json:json:20160810
|
||||
org.jsoup:jsoup:1.13.1
|
||||
org.junit-pioneer:junit-pioneer:0.7.0
|
||||
org.junit.jupiter:junit-jupiter-api:5.6.2
|
||||
org.junit.jupiter:junit-jupiter-engine:5.6.2
|
||||
org.junit.jupiter:junit-jupiter-migrationsupport:5.6.2
|
||||
org.junit.jupiter:junit-jupiter-params:5.6.2
|
||||
org.junit.platform:junit-platform-commons:1.6.2
|
||||
org.junit.platform:junit-platform-engine:1.6.2
|
||||
org.junit.platform:junit-platform-launcher:1.6.2
|
||||
org.junit.platform:junit-platform-runner:1.6.2
|
||||
org.junit.platform:junit-platform-suite-api:1.6.2
|
||||
org.junit:junit-bom:5.6.2
|
||||
org.junit.jupiter:junit-jupiter-api:5.7.0
|
||||
org.junit.jupiter:junit-jupiter-engine:5.7.0
|
||||
org.junit.jupiter:junit-jupiter-migrationsupport:5.7.0
|
||||
org.junit.jupiter:junit-jupiter-params:5.7.0
|
||||
org.junit.platform:junit-platform-commons:1.7.0
|
||||
org.junit.platform:junit-platform-engine:1.7.0
|
||||
org.junit.platform:junit-platform-launcher:1.7.0
|
||||
org.junit.platform:junit-platform-runner:1.7.0
|
||||
org.junit.platform:junit-platform-suite-api:1.7.0
|
||||
org.junit:junit-bom:5.7.0
|
||||
org.jvnet.staxex:stax-ex:1.8
|
||||
org.mockito:mockito-core:3.3.3
|
||||
org.mockito:mockito-junit-jupiter:3.3.3
|
||||
@@ -290,8 +289,6 @@ org.ow2.asm:asm:8.0.1
|
||||
org.postgresql:postgresql:42.2.18
|
||||
org.rnorth.duct-tape:duct-tape:1.0.8
|
||||
org.rnorth.visible-assertions:visible-assertions:2.1.2
|
||||
org.rnorth:tcp-unix-socket-proxy:1.0.2
|
||||
org.scijava:native-lib-loader:2.0.2
|
||||
org.seleniumhq.selenium:selenium-api:3.141.59
|
||||
org.seleniumhq.selenium:selenium-chrome-driver:3.141.59
|
||||
org.seleniumhq.selenium:selenium-edge-driver:3.141.59
|
||||
@@ -306,12 +303,12 @@ org.slf4j:jcl-over-slf4j:1.7.30
|
||||
org.slf4j:jul-to-slf4j:1.7.30
|
||||
org.slf4j:slf4j-api:1.7.30
|
||||
org.slf4j:slf4j-jdk14:1.7.28
|
||||
org.testcontainers:database-commons:1.14.3
|
||||
org.testcontainers:jdbc:1.14.3
|
||||
org.testcontainers:junit-jupiter:1.14.3
|
||||
org.testcontainers:postgresql:1.14.3
|
||||
org.testcontainers:selenium:1.14.3
|
||||
org.testcontainers:testcontainers:1.14.3
|
||||
org.testcontainers:database-commons:1.15.1
|
||||
org.testcontainers:jdbc:1.15.1
|
||||
org.testcontainers:junit-jupiter:1.15.1
|
||||
org.testcontainers:postgresql:1.15.1
|
||||
org.testcontainers:selenium:1.15.1
|
||||
org.testcontainers:testcontainers:1.15.1
|
||||
org.threeten:threetenbp:1.4.5
|
||||
org.tukaani:xz:1.8
|
||||
org.w3c.css:sac:1.3
|
||||
|
||||
@@ -85,7 +85,7 @@ public class ReplayCommitLogsToSqlAction implements Runnable {
|
||||
Optional<Lock> lock =
|
||||
Lock.acquire(
|
||||
this.getClass().getSimpleName(), null, LEASE_LENGTH, requestStatusChecker, false);
|
||||
if (lock.isEmpty()) {
|
||||
if (!lock.isPresent()) {
|
||||
String message = "Can't acquire SQL commit log replay lock, aborting.";
|
||||
logger.atSevere().log(message);
|
||||
// App Engine will retry on any non-2xx status code, which we don't want in this case.
|
||||
@@ -182,7 +182,7 @@ public class ReplayCommitLogsToSqlAction implements Runnable {
|
||||
}
|
||||
|
||||
private static int compareByWeight(VersionedEntity a, VersionedEntity b) {
|
||||
return getEntityPriority(a.key().getKind(), a.getEntity().isEmpty())
|
||||
- getEntityPriority(b.key().getKind(), b.getEntity().isEmpty());
|
||||
return getEntityPriority(a.key().getKind(), !a.getEntity().isPresent())
|
||||
- getEntityPriority(b.key().getKind(), !b.getEntity().isPresent());
|
||||
}
|
||||
}
|
||||
|
||||
@@ -345,7 +345,7 @@ public class DeleteContactsAndHostsAction implements Runnable {
|
||||
String resourceClientId = resource.getPersistedCurrentSponsorClientId();
|
||||
if (resource instanceof HostResource && ((HostResource) resource).isSubordinate()) {
|
||||
resourceClientId =
|
||||
tm().load(((HostResource) resource).getSuperordinateDomain())
|
||||
tm().loadByKey(((HostResource) resource).getSuperordinateDomain())
|
||||
.cloneProjectedAtTime(now)
|
||||
.getCurrentSponsorClientId();
|
||||
}
|
||||
@@ -465,7 +465,7 @@ public class DeleteContactsAndHostsAction implements Runnable {
|
||||
if (host.isSubordinate()) {
|
||||
dnsQueue.addHostRefreshTask(host.getHostName());
|
||||
tm().put(
|
||||
tm().load(host.getSuperordinateDomain())
|
||||
tm().loadByKey(host.getSuperordinateDomain())
|
||||
.asBuilder()
|
||||
.removeSubordinateHost(host.getHostName())
|
||||
.build());
|
||||
|
||||
@@ -0,0 +1,193 @@
|
||||
// Copyright 2021 The Nomulus Authors. All Rights Reserved.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package google.registry.batch;
|
||||
|
||||
import static com.google.common.collect.ImmutableList.toImmutableList;
|
||||
import static com.google.common.net.MediaType.PLAIN_TEXT_UTF_8;
|
||||
import static google.registry.flows.FlowUtils.marshalWithLenientRetry;
|
||||
import static google.registry.model.ofy.ObjectifyService.ofy;
|
||||
import static google.registry.persistence.transaction.TransactionManagerFactory.tm;
|
||||
import static google.registry.util.DateTimeUtils.END_OF_TIME;
|
||||
import static google.registry.util.ResourceUtils.readResourceUtf8;
|
||||
import static java.nio.charset.StandardCharsets.UTF_8;
|
||||
import static javax.servlet.http.HttpServletResponse.SC_INTERNAL_SERVER_ERROR;
|
||||
import static javax.servlet.http.HttpServletResponse.SC_NO_CONTENT;
|
||||
import static javax.servlet.http.HttpServletResponse.SC_OK;
|
||||
|
||||
import com.google.common.collect.ImmutableList;
|
||||
import com.google.common.flogger.FluentLogger;
|
||||
import google.registry.config.RegistryConfig.Config;
|
||||
import google.registry.flows.EppController;
|
||||
import google.registry.flows.EppRequestSource;
|
||||
import google.registry.flows.PasswordOnlyTransportCredentials;
|
||||
import google.registry.flows.StatelessRequestSessionMetadata;
|
||||
import google.registry.model.domain.DomainBase;
|
||||
import google.registry.model.eppcommon.ProtocolDefinition;
|
||||
import google.registry.model.eppoutput.EppOutput;
|
||||
import google.registry.request.Action;
|
||||
import google.registry.request.Action.Method;
|
||||
import google.registry.request.Response;
|
||||
import google.registry.request.auth.Auth;
|
||||
import google.registry.request.lock.LockHandler;
|
||||
import google.registry.util.Clock;
|
||||
import java.util.Optional;
|
||||
import java.util.concurrent.Callable;
|
||||
import javax.inject.Inject;
|
||||
import org.joda.time.DateTime;
|
||||
import org.joda.time.Duration;
|
||||
|
||||
/**
|
||||
* An action that deletes all non-renewing domains whose expiration dates have now passed.
|
||||
*
|
||||
* <p>The registry runs on an autorenew domain model, so domains don't ever expire naturally; they
|
||||
* are only ever autorenewed. However, in some situations (such as URS) we don't want this to
|
||||
* happen. Thus, the domains are tagged as non-renewing and are deleted by the next daily invocation
|
||||
* of this action once they are past the date at which they were to expire.
|
||||
*
|
||||
* <p>Note that this action works by running a superuser EPP domain delete command, and as a side
|
||||
* effect of when domains are deleted (just past their expiration date), they are invariably in the
|
||||
* autorenew grace period when this happens.
|
||||
*/
|
||||
@Action(
|
||||
service = Action.Service.BACKEND,
|
||||
path = DeleteExpiredDomainsAction.PATH,
|
||||
auth = Auth.AUTH_INTERNAL_OR_ADMIN,
|
||||
method = Method.POST)
|
||||
public class DeleteExpiredDomainsAction implements Runnable {
|
||||
|
||||
public static final String PATH = "/_dr/task/deleteExpiredDomains";
|
||||
private static final String LOCK_NAME = "Delete expired domains";
|
||||
|
||||
private static final FluentLogger logger = FluentLogger.forEnclosingClass();
|
||||
private final EppController eppController;
|
||||
private final String registryAdminClientId;
|
||||
private final Clock clock;
|
||||
private final LockHandler lockHandler;
|
||||
private final Response response;
|
||||
private final String deleteXmlTmpl;
|
||||
|
||||
@Inject
|
||||
DeleteExpiredDomainsAction(
|
||||
EppController eppController,
|
||||
@Config("registryAdminClientId") String registryAdminClientId,
|
||||
Clock clock,
|
||||
LockHandler lockHandler,
|
||||
Response response) {
|
||||
this.eppController = eppController;
|
||||
this.registryAdminClientId = registryAdminClientId;
|
||||
this.clock = clock;
|
||||
this.lockHandler = lockHandler;
|
||||
this.response = response;
|
||||
this.deleteXmlTmpl =
|
||||
readResourceUtf8(DeleteExpiredDomainsAction.class, "delete_expired_domain.xml");
|
||||
}
|
||||
|
||||
@Override
|
||||
public void run() {
|
||||
response.setContentType(PLAIN_TEXT_UTF_8);
|
||||
|
||||
Callable<Void> runner =
|
||||
() -> {
|
||||
try {
|
||||
runLocked();
|
||||
response.setStatus(SC_OK);
|
||||
} catch (Exception e) {
|
||||
response.setStatus(SC_INTERNAL_SERVER_ERROR);
|
||||
response.setPayload("Encountered error; see GCP logs for full details.");
|
||||
}
|
||||
return null;
|
||||
};
|
||||
|
||||
if (!lockHandler.executeWithLocks(runner, null, Duration.standardHours(1), LOCK_NAME)) {
|
||||
// Send a 200-series status code to prevent this conflicting action from retrying.
|
||||
response.setStatus(SC_NO_CONTENT);
|
||||
response.setPayload("Could not acquire lock; already running?");
|
||||
}
|
||||
}
|
||||
|
||||
private void runLocked() {
|
||||
DateTime runTime = clock.nowUtc();
|
||||
logger.atInfo().log(
|
||||
"Deleting non-renewing domains with autorenew end times up through %s.", runTime);
|
||||
|
||||
// Note: This query is (and must be) non-transactional, and thus, is only eventually consistent.
|
||||
ImmutableList<DomainBase> domainsToDelete =
|
||||
ofy().load().type(DomainBase.class).filter("autorenewEndTime <=", runTime).list().stream()
|
||||
// Datastore can't do two inequalities in one query, so the second happens in-memory.
|
||||
.filter(d -> d.getDeletionTime().isEqual(END_OF_TIME))
|
||||
.collect(toImmutableList());
|
||||
if (domainsToDelete.isEmpty()) {
|
||||
logger.atInfo().log("Found 0 domains to delete.");
|
||||
response.setPayload("Found 0 domains to delete.");
|
||||
return;
|
||||
}
|
||||
|
||||
logger.atInfo().log(
|
||||
"Found %d domains to delete: %s.",
|
||||
domainsToDelete.size(),
|
||||
String.join(
|
||||
", ",
|
||||
domainsToDelete.stream().map(DomainBase::getDomainName).collect(toImmutableList())));
|
||||
domainsToDelete.forEach(this::runDomainDeleteFlow);
|
||||
logger.atInfo().log("Finished deleting domains.");
|
||||
response.setPayload("Finished deleting domains.");
|
||||
}
|
||||
|
||||
private void runDomainDeleteFlow(DomainBase domain) {
|
||||
logger.atInfo().log("Attempting to delete domain %s", domain.getDomainName());
|
||||
// Create a new transaction that the flow's execution will be enlisted in that loads the domain
|
||||
// transactionally. This way we can ensure that nothing else has modified the domain in question
|
||||
// in the intervening period since the query above found it.
|
||||
Optional<EppOutput> eppOutput =
|
||||
tm().transact(
|
||||
() -> {
|
||||
DomainBase transDomain = tm().loadByKey(domain.createVKey());
|
||||
if (!domain.getAutorenewEndTime().isPresent()
|
||||
|| domain.getAutorenewEndTime().get().isAfter(tm().getTransactionTime())) {
|
||||
logger.atSevere().log(
|
||||
"Failed to delete domain %s because of its autorenew end time: %s.",
|
||||
transDomain.getDomainName(), transDomain.getAutorenewEndTime());
|
||||
return Optional.empty();
|
||||
} else if (domain.getDeletionTime().isBefore(END_OF_TIME)) {
|
||||
logger.atSevere().log(
|
||||
"Failed to delete domain %s because it was already deleted on %s.",
|
||||
transDomain.getDomainName(), transDomain.getDeletionTime());
|
||||
return Optional.empty();
|
||||
}
|
||||
return Optional.of(
|
||||
eppController.handleEppCommand(
|
||||
new StatelessRequestSessionMetadata(
|
||||
registryAdminClientId,
|
||||
ProtocolDefinition.getVisibleServiceExtensionUris()),
|
||||
new PasswordOnlyTransportCredentials(),
|
||||
EppRequestSource.BACKEND,
|
||||
false,
|
||||
true,
|
||||
deleteXmlTmpl
|
||||
.replace("%DOMAIN%", transDomain.getDomainName())
|
||||
.getBytes(UTF_8)));
|
||||
});
|
||||
|
||||
if (eppOutput.isPresent()) {
|
||||
if (eppOutput.get().isSuccess()) {
|
||||
logger.atInfo().log("Successfully deleted domain %s", domain.getDomainName());
|
||||
} else {
|
||||
logger.atWarning().log(
|
||||
"Failed to delete domain %s; EPP response:\n\n%s",
|
||||
domain.getDomainName(), new String(marshalWithLenientRetry(eppOutput.get()), UTF_8));
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -14,6 +14,7 @@
|
||||
|
||||
package google.registry.batch;
|
||||
|
||||
import static google.registry.mapreduce.MapreduceRunner.PARAM_FAST;
|
||||
import static google.registry.model.ofy.ObjectifyService.ofy;
|
||||
import static google.registry.persistence.transaction.TransactionManagerFactory.tm;
|
||||
|
||||
@@ -24,6 +25,7 @@ import google.registry.mapreduce.MapreduceRunner;
|
||||
import google.registry.mapreduce.inputs.EppResourceInputs;
|
||||
import google.registry.model.EppResource;
|
||||
import google.registry.request.Action;
|
||||
import google.registry.request.Parameter;
|
||||
import google.registry.request.Response;
|
||||
import google.registry.request.auth.Auth;
|
||||
import javax.inject.Inject;
|
||||
@@ -39,6 +41,14 @@ import javax.inject.Inject;
|
||||
* <p>Because there are no auth settings in the {@link Action} annotation, this command can only be
|
||||
* run internally, or by pretending to be internal by setting the X-AppEngine-QueueName header,
|
||||
* which only admin users can do.
|
||||
*
|
||||
* <p>If the <code>?fast=true</code> querystring parameter is passed, then entities that are not
|
||||
* changed by {@link EppResource#cloneProjectedAtTime} will not be re-saved. This helps prevent
|
||||
* mutation load on the DB and has the beneficial side effect of writing out smaller commit logs.
|
||||
* Note that this does NOT pick up mutations caused by migrations using the {@link
|
||||
* com.googlecode.objectify.annotation.OnLoad} annotation, so if you are running a one-off schema
|
||||
* migration, do not use fast mode. Fast mode defaults to false for this reason, but is used by the
|
||||
* monthly invocation of the mapreduce.
|
||||
*/
|
||||
@Action(
|
||||
service = Action.Service.BACKEND,
|
||||
@@ -48,7 +58,13 @@ public class ResaveAllEppResourcesAction implements Runnable {
|
||||
|
||||
@Inject MapreduceRunner mrRunner;
|
||||
@Inject Response response;
|
||||
@Inject ResaveAllEppResourcesAction() {}
|
||||
|
||||
@Inject
|
||||
@Parameter(PARAM_FAST)
|
||||
boolean isFast;
|
||||
|
||||
@Inject
|
||||
ResaveAllEppResourcesAction() {}
|
||||
|
||||
/**
|
||||
* The number of shards to run the map-only mapreduce on.
|
||||
@@ -66,7 +82,7 @@ public class ResaveAllEppResourcesAction implements Runnable {
|
||||
.setModuleName("backend")
|
||||
.setDefaultMapShards(NUM_SHARDS)
|
||||
.runMapOnly(
|
||||
new ResaveAllEppResourcesActionMapper(),
|
||||
new ResaveAllEppResourcesActionMapper(isFast),
|
||||
ImmutableList.of(EppResourceInputs.createKeyInput(EppResource.class)))
|
||||
.sendLinkToMapreduceConsole(response);
|
||||
}
|
||||
@@ -76,23 +92,33 @@ public class ResaveAllEppResourcesAction implements Runnable {
|
||||
extends Mapper<Key<EppResource>, Void, Void> {
|
||||
|
||||
private static final long serialVersionUID = -7721628665138087001L;
|
||||
public ResaveAllEppResourcesActionMapper() {}
|
||||
|
||||
private final boolean isFast;
|
||||
|
||||
ResaveAllEppResourcesActionMapper(boolean isFast) {
|
||||
this.isFast = isFast;
|
||||
}
|
||||
|
||||
@Override
|
||||
public final void map(final Key<EppResource> resourceKey) {
|
||||
tm()
|
||||
.transact(
|
||||
() -> {
|
||||
EppResource projectedResource =
|
||||
ofy()
|
||||
.load()
|
||||
.key(resourceKey)
|
||||
.now()
|
||||
.cloneProjectedAtTime(tm().getTransactionTime());
|
||||
ofy().save().entity(projectedResource).now();
|
||||
});
|
||||
getContext().incrementCounter(String.format("%s entities re-saved", resourceKey.getKind()));
|
||||
boolean resaved =
|
||||
tm().transact(
|
||||
() -> {
|
||||
EppResource originalResource = ofy().load().key(resourceKey).now();
|
||||
EppResource projectedResource =
|
||||
originalResource.cloneProjectedAtTime(tm().getTransactionTime());
|
||||
if (isFast && originalResource.equals(projectedResource)) {
|
||||
return false;
|
||||
} else {
|
||||
ofy().save().entity(projectedResource).now();
|
||||
return true;
|
||||
}
|
||||
});
|
||||
getContext()
|
||||
.incrementCounter(
|
||||
String.format(
|
||||
"%s entities %s",
|
||||
resourceKey.getKind(), resaved ? "re-saved" : "with no changes skipped"));
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -0,0 +1,17 @@
|
||||
<epp xmlns="urn:ietf:params:xml:ns:epp-1.0">
|
||||
<command>
|
||||
<delete>
|
||||
<domain:delete
|
||||
xmlns:domain="urn:ietf:params:xml:ns:domain-1.0">
|
||||
<domain:name>%DOMAIN%</domain:name>
|
||||
</domain:delete>
|
||||
</delete>
|
||||
<extension>
|
||||
<metadata:metadata xmlns:metadata="urn:google:params:xml:ns:metadata-1.0">
|
||||
<metadata:reason>Non-renewing domain has reached expiration date.</metadata:reason>
|
||||
<metadata:requestedByRegistrar>false</metadata:requestedByRegistrar>
|
||||
</metadata:metadata>
|
||||
</extension>
|
||||
<clTRID>ABC-12345</clTRID>
|
||||
</command>
|
||||
</epp>
|
||||
@@ -0,0 +1,330 @@
|
||||
// Copyright 2020 The Nomulus Authors. All Rights Reserved.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package google.registry.beam.datastore;
|
||||
|
||||
import static com.google.common.base.Preconditions.checkState;
|
||||
import static org.apache.beam.sdk.values.TypeDescriptors.kvs;
|
||||
import static org.apache.beam.sdk.values.TypeDescriptors.strings;
|
||||
|
||||
import com.google.common.annotations.VisibleForTesting;
|
||||
import com.google.common.base.Splitter;
|
||||
import com.google.common.collect.ImmutableList;
|
||||
import com.google.common.collect.ImmutableSet;
|
||||
import com.google.common.collect.ImmutableSortedSet;
|
||||
import com.google.common.flogger.FluentLogger;
|
||||
import com.google.datastore.v1.Entity;
|
||||
import java.util.Iterator;
|
||||
import java.util.Map;
|
||||
import org.apache.beam.sdk.Pipeline;
|
||||
import org.apache.beam.sdk.extensions.gcp.options.GcpOptions;
|
||||
import org.apache.beam.sdk.io.gcp.datastore.DatastoreIO;
|
||||
import org.apache.beam.sdk.options.Default;
|
||||
import org.apache.beam.sdk.options.Description;
|
||||
import org.apache.beam.sdk.options.PipelineOptionsFactory;
|
||||
import org.apache.beam.sdk.options.Validation;
|
||||
import org.apache.beam.sdk.transforms.Create;
|
||||
import org.apache.beam.sdk.transforms.DoFn;
|
||||
import org.apache.beam.sdk.transforms.GroupByKey;
|
||||
import org.apache.beam.sdk.transforms.MapElements;
|
||||
import org.apache.beam.sdk.transforms.PTransform;
|
||||
import org.apache.beam.sdk.transforms.ParDo;
|
||||
import org.apache.beam.sdk.transforms.Reshuffle;
|
||||
import org.apache.beam.sdk.transforms.View;
|
||||
import org.apache.beam.sdk.values.KV;
|
||||
import org.apache.beam.sdk.values.PBegin;
|
||||
import org.apache.beam.sdk.values.PCollection;
|
||||
import org.apache.beam.sdk.values.PCollectionTuple;
|
||||
import org.apache.beam.sdk.values.PCollectionView;
|
||||
import org.apache.beam.sdk.values.TupleTag;
|
||||
import org.apache.beam.sdk.values.TupleTagList;
|
||||
|
||||
/**
|
||||
* A BEAM pipeline that deletes Datastore entities in bulk.
|
||||
*
|
||||
* <p>This pipeline provides an alternative to the <a
|
||||
* href="https://cloud.google.com/datastore/docs/bulk-delete">GCP builtin template</a> that performs
|
||||
* the same task. It solves the following performance and usability problems in the builtin
|
||||
* template:
|
||||
*
|
||||
* <ul>
|
||||
* <li>When deleting all data (by using the {@code select __key__} or {@code select *} queries),
|
||||
* the builtin template cannot parallelize the query, therefore has to query with a single
|
||||
* worker.
|
||||
* <li>When deleting all data, the builtin template also attempts to delete Datastore internal
|
||||
* tables which would cause permission-denied errors, which in turn MAY cause the pipeline to
|
||||
* abort before all data has been deleted.
|
||||
* <li>With the builtin template, it is possible to delete multiple entity types in one pipeline
|
||||
* ONLY if the user can come up with a single literal query that covers all of them. This is
|
||||
* not the case with most Nomulus entity types.
|
||||
* </ul>
|
||||
*
|
||||
* <p>A user of this pipeline must specify the types of entities to delete using the {@code
|
||||
* --kindsToDelete} command line argument. To delete specific entity types, give a comma-separated
|
||||
* string of their kind names; to delete all data, give {@code "*"}.
|
||||
*
|
||||
* <p>When deleting all data, it is recommended for the user to specify the number of user entity
|
||||
* types in the Datastore using the {@code --numOfKindsHint} argument. If the default value for this
|
||||
* parameter is too low, performance will suffer.
|
||||
*/
|
||||
public class BulkDeletePipeline {
|
||||
private static final FluentLogger logger = FluentLogger.forEnclosingClass();
|
||||
|
||||
// This tool is not for use in our critical projects.
|
||||
private static final ImmutableSet<String> FORBIDDEN_PROJECTS =
|
||||
ImmutableSet.of("domain-registry", "domain-registry-sandbox");
|
||||
|
||||
private final BulkDeletePipelineOptions options;
|
||||
|
||||
private final Pipeline pipeline;
|
||||
|
||||
BulkDeletePipeline(BulkDeletePipelineOptions options) {
|
||||
this.options = options;
|
||||
pipeline = Pipeline.create(options);
|
||||
}
|
||||
|
||||
public void run() {
|
||||
setupPipeline();
|
||||
pipeline.run();
|
||||
}
|
||||
|
||||
@SuppressWarnings("deprecation") // org.apache.beam.sdk.transforms.Reshuffle
|
||||
private void setupPipeline() {
|
||||
checkState(
|
||||
!FORBIDDEN_PROJECTS.contains(options.getProject()),
|
||||
"Bulk delete is forbidden in %s",
|
||||
options.getProject());
|
||||
|
||||
// Pre-allocated tags to label entities by kind. In the case of delete-all, we must use a guess.
|
||||
TupleTagList deletionTags;
|
||||
PCollection<String> kindsToDelete;
|
||||
|
||||
if (options.getKindsToDelete().equals("*")) {
|
||||
deletionTags = getDeletionTags(options.getNumOfKindsHint());
|
||||
kindsToDelete =
|
||||
pipeline.apply("DiscoverEntityKinds", discoverEntityKinds(options.getProject()));
|
||||
} else {
|
||||
ImmutableList<String> kindsToDeleteParam = parseKindsToDelete(options);
|
||||
checkState(
|
||||
!kindsToDeleteParam.contains("*"),
|
||||
"The --kindsToDelete argument should not contain both '*' and other kinds.");
|
||||
deletionTags = getDeletionTags(kindsToDeleteParam.size());
|
||||
kindsToDelete = pipeline.apply("UseProvidedKinds", Create.of(kindsToDeleteParam));
|
||||
}
|
||||
|
||||
// Map each kind to a tag. The "SplitByKind" stage below will group entities by kind using
|
||||
// this mapping. In practice, this has been effective at avoiding entity group contentions.
|
||||
PCollectionView<Map<String, TupleTag<Entity>>> kindToTagMapping =
|
||||
mapKindsToDeletionTags(kindsToDelete, deletionTags).apply("GetKindsToTagMap", View.asMap());
|
||||
|
||||
PCollectionTuple entities =
|
||||
kindsToDelete
|
||||
.apply("GenerateQueries", ParDo.of(new GenerateQueries()))
|
||||
.apply("ReadEntities", DatastoreV1.read().withProjectId(options.getProject()))
|
||||
.apply(
|
||||
"SplitByKind",
|
||||
ParDo.of(new SplitEntities(kindToTagMapping))
|
||||
.withSideInputs(kindToTagMapping)
|
||||
.withOutputTags(getOneDeletionTag("placeholder"), deletionTags));
|
||||
|
||||
for (TupleTag<?> tag : deletionTags.getAll()) {
|
||||
entities
|
||||
.get((TupleTag<Entity>) tag)
|
||||
// Reshuffle calls GroupByKey which is one way to trigger load rebalance in the pipeline.
|
||||
// Using the deprecated "Reshuffle" for convenience given the short life of this tool.
|
||||
.apply("RebalanceLoad", Reshuffle.viaRandomKey())
|
||||
.apply(
|
||||
"DeleteEntities_" + tag.getId(),
|
||||
DatastoreIO.v1().deleteEntity().withProjectId(options.getProject()));
|
||||
}
|
||||
}
|
||||
|
||||
private static String toKeyOnlyQueryForKind(String kind) {
|
||||
return "select __key__ from `" + kind + "`";
|
||||
}
|
||||
|
||||
/**
|
||||
* Returns a {@link TupleTag} that retains the generic type parameter and may be used in a
|
||||
* multi-output {@link ParDo} (e.g. {@link SplitEntities}).
|
||||
*
|
||||
* <p>This method is NOT needed in tests when creating tags for assertions. Simply create them
|
||||
* with {@code new TupleTag<Entity>(String)}.
|
||||
*/
|
||||
@VisibleForTesting
|
||||
static TupleTag<Entity> getOneDeletionTag(String id) {
|
||||
// The trailing {} is needed to retain generic param type.
|
||||
return new TupleTag<Entity>(id) {};
|
||||
}
|
||||
|
||||
@VisibleForTesting
|
||||
static ImmutableList<String> parseKindsToDelete(BulkDeletePipelineOptions options) {
|
||||
return ImmutableList.copyOf(
|
||||
Splitter.on(",").omitEmptyStrings().trimResults().split(options.getKindsToDelete().trim()));
|
||||
}
|
||||
|
||||
/**
|
||||
* Returns a list of {@code n} {@link TupleTag TupleTags} numbered from {@code 0} to {@code n-1}.
|
||||
*/
|
||||
@VisibleForTesting
|
||||
static TupleTagList getDeletionTags(int n) {
|
||||
ImmutableList.Builder<TupleTag<?>> builder = new ImmutableList.Builder<>();
|
||||
for (int i = 0; i < n; i++) {
|
||||
builder.add(getOneDeletionTag(String.valueOf(i)));
|
||||
}
|
||||
return TupleTagList.of(builder.build());
|
||||
}
|
||||
|
||||
/** Returns a {@link PTransform} that finds all entity kinds in Datastore. */
|
||||
@VisibleForTesting
|
||||
static PTransform<PBegin, PCollection<String>> discoverEntityKinds(String project) {
|
||||
return new PTransform<PBegin, PCollection<String>>() {
|
||||
@Override
|
||||
public PCollection<String> expand(PBegin input) {
|
||||
// Use the __kind__ table to discover entity kinds. Data in the more informational
|
||||
// __Stat_Kind__ table may be up to 48-hour stale.
|
||||
return input
|
||||
.apply(
|
||||
"LoadEntityMetaData",
|
||||
DatastoreIO.v1()
|
||||
.read()
|
||||
.withProjectId(project)
|
||||
.withLiteralGqlQuery("select * from __kind__"))
|
||||
.apply(
|
||||
"GetKindNames",
|
||||
ParDo.of(
|
||||
new DoFn<Entity, String>() {
|
||||
@ProcessElement
|
||||
public void processElement(
|
||||
@Element Entity entity, OutputReceiver<String> out) {
|
||||
String kind = entity.getKey().getPath(0).getName();
|
||||
if (kind.startsWith("_")) {
|
||||
return;
|
||||
}
|
||||
out.output(kind);
|
||||
}
|
||||
}));
|
||||
}
|
||||
};
|
||||
}
|
||||
|
||||
@VisibleForTesting
|
||||
static PCollection<KV<String, TupleTag<Entity>>> mapKindsToDeletionTags(
|
||||
PCollection<String> kinds, TupleTagList tags) {
|
||||
// The first two stages send all strings in the 'kinds' PCollection to one worker which
|
||||
// performs the mapping in the last stage.
|
||||
return kinds
|
||||
.apply(
|
||||
"AssignSingletonKeyToKinds",
|
||||
MapElements.into(kvs(strings(), strings())).via(kind -> KV.of("", kind)))
|
||||
.apply("GatherKindsIntoCollection", GroupByKey.create())
|
||||
.apply("MapKindsToTag", ParDo.of(new MapKindsToTags(tags)));
|
||||
}
|
||||
|
||||
/** Transforms each {@code kind} string into a Datastore query for that kind. */
|
||||
@VisibleForTesting
|
||||
static class GenerateQueries extends DoFn<String, String> {
|
||||
@ProcessElement
|
||||
public void processElement(@Element String kind, OutputReceiver<String> out) {
|
||||
out.output(toKeyOnlyQueryForKind(kind));
|
||||
}
|
||||
}
|
||||
|
||||
private static class MapKindsToTags
|
||||
extends DoFn<KV<String, Iterable<String>>, KV<String, TupleTag<Entity>>> {
|
||||
private final TupleTagList tupleTags;
|
||||
|
||||
MapKindsToTags(TupleTagList tupleTags) {
|
||||
this.tupleTags = tupleTags;
|
||||
}
|
||||
|
||||
@ProcessElement
|
||||
public void processElement(
|
||||
@Element KV<String, Iterable<String>> kv,
|
||||
OutputReceiver<KV<String, TupleTag<Entity>>> out) {
|
||||
// Sort kinds so that mapping is deterministic.
|
||||
ImmutableSortedSet<String> sortedKinds = ImmutableSortedSet.copyOf(kv.getValue());
|
||||
Iterator<String> kinds = sortedKinds.iterator();
|
||||
Iterator<TupleTag<?>> tags = tupleTags.getAll().iterator();
|
||||
|
||||
while (kinds.hasNext() && tags.hasNext()) {
|
||||
out.output(KV.of(kinds.next(), (TupleTag<Entity>) tags.next()));
|
||||
}
|
||||
|
||||
if (kinds.hasNext()) {
|
||||
logger.atWarning().log(
|
||||
"There are more kinds to delete (%s) than our estimate (%s). "
|
||||
+ "Performance may suffer.",
|
||||
sortedKinds.size(), tupleTags.size());
|
||||
}
|
||||
// Round robin assignment so that mapping is deterministic
|
||||
while (kinds.hasNext()) {
|
||||
tags = tupleTags.getAll().iterator();
|
||||
while (kinds.hasNext() && tags.hasNext()) {
|
||||
out.output(KV.of(kinds.next(), (TupleTag<Entity>) tags.next()));
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* {@link DoFn} that splits one {@link PCollection} of mixed kinds into multiple single-kind
|
||||
* {@code PCollections}.
|
||||
*/
|
||||
@VisibleForTesting
|
||||
static class SplitEntities extends DoFn<Entity, Entity> {
|
||||
private final PCollectionView<Map<String, TupleTag<Entity>>> kindToTagMapping;
|
||||
|
||||
SplitEntities(PCollectionView<Map<String, TupleTag<Entity>>> kindToTagMapping) {
|
||||
super();
|
||||
this.kindToTagMapping = kindToTagMapping;
|
||||
}
|
||||
|
||||
@ProcessElement
|
||||
public void processElement(ProcessContext context) {
|
||||
Entity entity = context.element();
|
||||
com.google.datastore.v1.Key entityKey = entity.getKey();
|
||||
String kind = entityKey.getPath(entityKey.getPathCount() - 1).getKind();
|
||||
TupleTag<Entity> tag = context.sideInput(kindToTagMapping).get(kind);
|
||||
context.output(tag, entity);
|
||||
}
|
||||
}
|
||||
|
||||
public static void main(String[] args) {
|
||||
BulkDeletePipelineOptions options =
|
||||
PipelineOptionsFactory.fromArgs(args).withValidation().as(BulkDeletePipelineOptions.class);
|
||||
BulkDeletePipeline pipeline = new BulkDeletePipeline(options);
|
||||
pipeline.run();
|
||||
System.exit(0);
|
||||
}
|
||||
|
||||
public interface BulkDeletePipelineOptions extends GcpOptions {
|
||||
|
||||
@Description(
|
||||
"The Datastore KINDs to be deleted. The format may be:\n"
|
||||
+ "\t- The list of kinds to be deleted as a comma-separated string, or\n"
|
||||
+ "\t- '*', which causes all kinds to be deleted.")
|
||||
@Validation.Required
|
||||
String getKindsToDelete();
|
||||
|
||||
void setKindsToDelete(String kinds);
|
||||
|
||||
@Description(
|
||||
"An estimate of the number of KINDs to be deleted. "
|
||||
+ "This is recommended if --kindsToDelete is '*' and the default value is too low.")
|
||||
@Default.Integer(30)
|
||||
int getNumOfKindsHint();
|
||||
|
||||
void setNumOfKindsHint(int numOfKindsHint);
|
||||
}
|
||||
}
|
||||
@@ -0,0 +1,765 @@
|
||||
// Copyright 2020 The Nomulus Authors. All Rights Reserved.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
// This class is adapted from the Apache BEAM SDK. The original license may
|
||||
// be found at <a href="https://github.com/apache/beam/blob/master/LICENSE">
|
||||
// this link</a>.
|
||||
|
||||
package google.registry.beam.datastore;
|
||||
|
||||
import static com.google.common.base.Preconditions.checkArgument;
|
||||
import static com.google.common.base.Verify.verify;
|
||||
import static com.google.datastore.v1.PropertyFilter.Operator.EQUAL;
|
||||
import static com.google.datastore.v1.PropertyOrder.Direction.DESCENDING;
|
||||
import static com.google.datastore.v1.QueryResultBatch.MoreResultsType.NOT_FINISHED;
|
||||
import static com.google.datastore.v1.client.DatastoreHelper.makeAndFilter;
|
||||
import static com.google.datastore.v1.client.DatastoreHelper.makeFilter;
|
||||
import static com.google.datastore.v1.client.DatastoreHelper.makeOrder;
|
||||
import static com.google.datastore.v1.client.DatastoreHelper.makeValue;
|
||||
|
||||
import com.google.api.client.http.HttpRequestInitializer;
|
||||
import com.google.auth.Credentials;
|
||||
import com.google.auth.http.HttpCredentialsAdapter;
|
||||
import com.google.auto.value.AutoValue;
|
||||
import com.google.cloud.hadoop.util.ChainingHttpRequestInitializer;
|
||||
import com.google.common.base.Strings;
|
||||
import com.google.common.collect.ImmutableList;
|
||||
import com.google.common.collect.ImmutableSet;
|
||||
import com.google.common.flogger.FluentLogger;
|
||||
import com.google.datastore.v1.Entity;
|
||||
import com.google.datastore.v1.EntityResult;
|
||||
import com.google.datastore.v1.GqlQuery;
|
||||
import com.google.datastore.v1.PartitionId;
|
||||
import com.google.datastore.v1.Query;
|
||||
import com.google.datastore.v1.QueryResultBatch;
|
||||
import com.google.datastore.v1.RunQueryRequest;
|
||||
import com.google.datastore.v1.RunQueryResponse;
|
||||
import com.google.datastore.v1.client.Datastore;
|
||||
import com.google.datastore.v1.client.DatastoreException;
|
||||
import com.google.datastore.v1.client.DatastoreFactory;
|
||||
import com.google.datastore.v1.client.DatastoreHelper;
|
||||
import com.google.datastore.v1.client.DatastoreOptions;
|
||||
import com.google.datastore.v1.client.QuerySplitter;
|
||||
import com.google.protobuf.Int32Value;
|
||||
import com.google.rpc.Code;
|
||||
import java.io.Serializable;
|
||||
import java.util.List;
|
||||
import java.util.NoSuchElementException;
|
||||
import javax.annotation.Nullable;
|
||||
import org.apache.beam.sdk.extensions.gcp.options.GcpOptions;
|
||||
import org.apache.beam.sdk.extensions.gcp.util.RetryHttpRequestInitializer;
|
||||
import org.apache.beam.sdk.metrics.Counter;
|
||||
import org.apache.beam.sdk.metrics.Metrics;
|
||||
import org.apache.beam.sdk.options.PipelineOptions;
|
||||
import org.apache.beam.sdk.transforms.DoFn;
|
||||
import org.apache.beam.sdk.transforms.PTransform;
|
||||
import org.apache.beam.sdk.transforms.ParDo;
|
||||
import org.apache.beam.sdk.transforms.Reshuffle;
|
||||
import org.apache.beam.sdk.transforms.display.DisplayData;
|
||||
import org.apache.beam.sdk.transforms.display.HasDisplayData;
|
||||
import org.apache.beam.sdk.util.BackOff;
|
||||
import org.apache.beam.sdk.util.BackOffUtils;
|
||||
import org.apache.beam.sdk.util.FluentBackoff;
|
||||
import org.apache.beam.sdk.util.Sleeper;
|
||||
import org.apache.beam.sdk.values.KV;
|
||||
import org.apache.beam.sdk.values.PCollection;
|
||||
import org.joda.time.Duration;
|
||||
|
||||
/**
|
||||
* Contains an adaptation of {@link org.apache.beam.sdk.io.gcp.datastore.DatastoreV1.Read}. See
|
||||
* {@link MultiRead} for details.
|
||||
*/
|
||||
public class DatastoreV1 {
|
||||
|
||||
// A package-private constructor to prevent direct instantiation from outside of this package
|
||||
DatastoreV1() {}
|
||||
|
||||
/**
|
||||
* Non-retryable errors. See https://cloud.google.com/datastore/docs/concepts/errors#Error_Codes .
|
||||
*/
|
||||
private static final ImmutableSet<Code> NON_RETRYABLE_ERRORS =
|
||||
ImmutableSet.of(
|
||||
Code.FAILED_PRECONDITION,
|
||||
Code.INVALID_ARGUMENT,
|
||||
Code.PERMISSION_DENIED,
|
||||
Code.UNAUTHENTICATED);
|
||||
|
||||
/**
|
||||
* Returns an empty {@link MultiRead} builder. Configure the source {@code projectId}, {@code
|
||||
* query}, and optionally {@code namespace} and {@code numQuerySplits} using {@link
|
||||
* MultiRead#withProjectId}, {@link MultiRead#withNamespace}, {@link
|
||||
* MultiRead#withNumQuerySplits}.
|
||||
*/
|
||||
public static MultiRead read() {
|
||||
return new AutoValue_DatastoreV1_MultiRead.Builder().setNumQuerySplits(0).build();
|
||||
}
|
||||
|
||||
/**
|
||||
* A {@link PTransform} that executes every Cloud SQL queries in a {@link PCollection } and reads
|
||||
* their result rows as {@code Entity} objects.
|
||||
*
|
||||
* <p>This class is adapted from {@link org.apache.beam.sdk.io.gcp.datastore.DatastoreV1.Read}. It
|
||||
* uses literal GQL queries in the input {@link PCollection} instead of a constant query provided
|
||||
* to the builder. Only the {@link #expand} method is modified from the original. Everything else
|
||||
* including comments have been copied verbatim.
|
||||
*/
|
||||
@AutoValue
|
||||
public abstract static class MultiRead
|
||||
extends PTransform<PCollection<String>, PCollection<Entity>> {
|
||||
private static final FluentLogger logger = FluentLogger.forEnclosingClass();
|
||||
|
||||
/** An upper bound on the number of splits for a query. */
|
||||
public static final int NUM_QUERY_SPLITS_MAX = 50000;
|
||||
|
||||
/** A lower bound on the number of splits for a query. */
|
||||
static final int NUM_QUERY_SPLITS_MIN = 12;
|
||||
|
||||
/** Default bundle size of 64MB. */
|
||||
static final long DEFAULT_BUNDLE_SIZE_BYTES = 64L * 1024L * 1024L;
|
||||
|
||||
/**
|
||||
* Maximum number of results to request per query.
|
||||
*
|
||||
* <p>Must be set, or it may result in an I/O error when querying Cloud Datastore.
|
||||
*/
|
||||
static final int QUERY_BATCH_LIMIT = 500;
|
||||
|
||||
public abstract @Nullable String getProjectId();
|
||||
|
||||
public abstract @Nullable String getNamespace();
|
||||
|
||||
public abstract int getNumQuerySplits();
|
||||
|
||||
public abstract @Nullable String getLocalhost();
|
||||
|
||||
@Override
|
||||
public abstract String toString();
|
||||
|
||||
abstract Builder toBuilder();
|
||||
|
||||
@AutoValue.Builder
|
||||
abstract static class Builder {
|
||||
abstract Builder setProjectId(String projectId);
|
||||
|
||||
abstract Builder setNamespace(String namespace);
|
||||
|
||||
abstract Builder setNumQuerySplits(int numQuerySplits);
|
||||
|
||||
abstract Builder setLocalhost(String localhost);
|
||||
|
||||
abstract MultiRead build();
|
||||
}
|
||||
|
||||
/**
|
||||
* Computes the number of splits to be performed on the given query by querying the estimated
|
||||
* size from Cloud Datastore.
|
||||
*/
|
||||
static int getEstimatedNumSplits(Datastore datastore, Query query, @Nullable String namespace) {
|
||||
int numSplits;
|
||||
try {
|
||||
long estimatedSizeBytes = getEstimatedSizeBytes(datastore, query, namespace);
|
||||
logger.atInfo().log("Estimated size bytes for the query is: %s", estimatedSizeBytes);
|
||||
numSplits =
|
||||
(int)
|
||||
Math.min(
|
||||
NUM_QUERY_SPLITS_MAX,
|
||||
Math.round(((double) estimatedSizeBytes) / DEFAULT_BUNDLE_SIZE_BYTES));
|
||||
} catch (Exception e) {
|
||||
logger.atWarning().log("Failed the fetch estimatedSizeBytes for query: %s", query, e);
|
||||
// Fallback in case estimated size is unavailable.
|
||||
numSplits = NUM_QUERY_SPLITS_MIN;
|
||||
}
|
||||
return Math.max(numSplits, NUM_QUERY_SPLITS_MIN);
|
||||
}
|
||||
|
||||
/**
|
||||
* Cloud Datastore system tables with statistics are periodically updated. This method fetches
|
||||
* the latest timestamp (in microseconds) of statistics update using the {@code __Stat_Total__}
|
||||
* table.
|
||||
*/
|
||||
private static long queryLatestStatisticsTimestamp(
|
||||
Datastore datastore, @Nullable String namespace) throws DatastoreException {
|
||||
Query.Builder query = Query.newBuilder();
|
||||
// Note: namespace either being null or empty represents the default namespace, in which
|
||||
// case we treat it as not provided by the user.
|
||||
if (Strings.isNullOrEmpty(namespace)) {
|
||||
query.addKindBuilder().setName("__Stat_Total__");
|
||||
} else {
|
||||
query.addKindBuilder().setName("__Stat_Ns_Total__");
|
||||
}
|
||||
query.addOrder(makeOrder("timestamp", DESCENDING));
|
||||
query.setLimit(Int32Value.newBuilder().setValue(1));
|
||||
RunQueryRequest request = makeRequest(query.build(), namespace);
|
||||
|
||||
RunQueryResponse response = datastore.runQuery(request);
|
||||
QueryResultBatch batch = response.getBatch();
|
||||
if (batch.getEntityResultsCount() == 0) {
|
||||
throw new NoSuchElementException("Datastore total statistics unavailable");
|
||||
}
|
||||
Entity entity = batch.getEntityResults(0).getEntity();
|
||||
return entity.getProperties().get("timestamp").getTimestampValue().getSeconds() * 1000000;
|
||||
}
|
||||
|
||||
/** Retrieve latest table statistics for a given kind, namespace, and datastore. */
|
||||
private static Entity getLatestTableStats(
|
||||
String ourKind, @Nullable String namespace, Datastore datastore) throws DatastoreException {
|
||||
long latestTimestamp = queryLatestStatisticsTimestamp(datastore, namespace);
|
||||
logger.atInfo().log("Latest stats timestamp for kind %s is %s", ourKind, latestTimestamp);
|
||||
|
||||
Query.Builder queryBuilder = Query.newBuilder();
|
||||
if (Strings.isNullOrEmpty(namespace)) {
|
||||
queryBuilder.addKindBuilder().setName("__Stat_Kind__");
|
||||
} else {
|
||||
queryBuilder.addKindBuilder().setName("__Stat_Ns_Kind__");
|
||||
}
|
||||
|
||||
queryBuilder.setFilter(
|
||||
makeAndFilter(
|
||||
makeFilter("kind_name", EQUAL, makeValue(ourKind).build()).build(),
|
||||
makeFilter("timestamp", EQUAL, makeValue(latestTimestamp).build()).build()));
|
||||
|
||||
RunQueryRequest request = makeRequest(queryBuilder.build(), namespace);
|
||||
|
||||
long now = System.currentTimeMillis();
|
||||
RunQueryResponse response = datastore.runQuery(request);
|
||||
logger.atFine().log(
|
||||
"Query for per-kind statistics took %sms", System.currentTimeMillis() - now);
|
||||
|
||||
QueryResultBatch batch = response.getBatch();
|
||||
if (batch.getEntityResultsCount() == 0) {
|
||||
throw new NoSuchElementException(
|
||||
"Datastore statistics for kind " + ourKind + " unavailable");
|
||||
}
|
||||
return batch.getEntityResults(0).getEntity();
|
||||
}
|
||||
|
||||
/**
|
||||
* Get the estimated size of the data returned by the given query.
|
||||
*
|
||||
* <p>Cloud Datastore provides no way to get a good estimate of how large the result of a query
|
||||
* entity kind being queried, using the __Stat_Kind__ system table, assuming exactly 1 kind is
|
||||
* specified in the query.
|
||||
*
|
||||
* <p>See https://cloud.google.com/datastore/docs/concepts/stats.
|
||||
*/
|
||||
static long getEstimatedSizeBytes(Datastore datastore, Query query, @Nullable String namespace)
|
||||
throws DatastoreException {
|
||||
String ourKind = query.getKind(0).getName();
|
||||
Entity entity = getLatestTableStats(ourKind, namespace, datastore);
|
||||
return entity.getProperties().get("entity_bytes").getIntegerValue();
|
||||
}
|
||||
|
||||
private static PartitionId.Builder forNamespace(@Nullable String namespace) {
|
||||
PartitionId.Builder partitionBuilder = PartitionId.newBuilder();
|
||||
// Namespace either being null or empty represents the default namespace.
|
||||
// Datastore Client libraries expect users to not set the namespace proto field in
|
||||
// either of these cases.
|
||||
if (!Strings.isNullOrEmpty(namespace)) {
|
||||
partitionBuilder.setNamespaceId(namespace);
|
||||
}
|
||||
return partitionBuilder;
|
||||
}
|
||||
|
||||
/** Builds a {@link RunQueryRequest} from the {@code query} and {@code namespace}. */
|
||||
static RunQueryRequest makeRequest(Query query, @Nullable String namespace) {
|
||||
return RunQueryRequest.newBuilder()
|
||||
.setQuery(query)
|
||||
.setPartitionId(forNamespace(namespace))
|
||||
.build();
|
||||
}
|
||||
|
||||
/** Builds a {@link RunQueryRequest} from the {@code GqlQuery} and {@code namespace}. */
|
||||
private static RunQueryRequest makeRequest(GqlQuery gqlQuery, @Nullable String namespace) {
|
||||
return RunQueryRequest.newBuilder()
|
||||
.setGqlQuery(gqlQuery)
|
||||
.setPartitionId(forNamespace(namespace))
|
||||
.build();
|
||||
}
|
||||
|
||||
/**
|
||||
* A helper function to get the split queries, taking into account the optional {@code
|
||||
* namespace}.
|
||||
*/
|
||||
private static List<Query> splitQuery(
|
||||
Query query,
|
||||
@Nullable String namespace,
|
||||
Datastore datastore,
|
||||
QuerySplitter querySplitter,
|
||||
int numSplits)
|
||||
throws DatastoreException {
|
||||
// If namespace is set, include it in the split request so splits are calculated accordingly.
|
||||
return querySplitter.getSplits(query, forNamespace(namespace).build(), numSplits, datastore);
|
||||
}
|
||||
|
||||
/**
|
||||
* Translates a Cloud Datastore gql query string to {@link Query}.
|
||||
*
|
||||
* <p>Currently, the only way to translate a gql query string to a Query is to run the query
|
||||
* against Cloud Datastore and extract the {@code Query} from the response. To prevent reading
|
||||
* any data, we set the {@code LIMIT} to 0 but if the gql query already has a limit set, we
|
||||
* catch the exception with {@code INVALID_ARGUMENT} error code and retry the translation
|
||||
* without the zero limit.
|
||||
*
|
||||
* <p>Note: This may result in reading actual data from Cloud Datastore but the service has a
|
||||
* cap on the number of entities returned for a single rpc request, so this should not be a
|
||||
* problem in practice.
|
||||
*/
|
||||
private static Query translateGqlQueryWithLimitCheck(
|
||||
String gql, Datastore datastore, String namespace) throws DatastoreException {
|
||||
String gqlQueryWithZeroLimit = gql + " LIMIT 0";
|
||||
try {
|
||||
Query translatedQuery = translateGqlQuery(gqlQueryWithZeroLimit, datastore, namespace);
|
||||
// Clear the limit that we set.
|
||||
return translatedQuery.toBuilder().clearLimit().build();
|
||||
} catch (DatastoreException e) {
|
||||
// Note: There is no specific error code or message to detect if the query already has a
|
||||
// limit, so we just check for INVALID_ARGUMENT and assume that that the query might have
|
||||
// a limit already set.
|
||||
if (e.getCode() == Code.INVALID_ARGUMENT) {
|
||||
logger.atWarning().log(
|
||||
"Failed to translate Gql query '%s': %s", gqlQueryWithZeroLimit, e.getMessage());
|
||||
logger.atWarning().log(
|
||||
"User query might have a limit already set, so trying without zero limit");
|
||||
// Retry without the zero limit.
|
||||
return translateGqlQuery(gql, datastore, namespace);
|
||||
} else {
|
||||
throw e;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/** Translates a gql query string to {@link Query}. */
|
||||
private static Query translateGqlQuery(String gql, Datastore datastore, String namespace)
|
||||
throws DatastoreException {
|
||||
logger.atInfo().log("Translating gql %s", gql);
|
||||
GqlQuery gqlQuery = GqlQuery.newBuilder().setQueryString(gql).setAllowLiterals(true).build();
|
||||
RunQueryRequest req = makeRequest(gqlQuery, namespace);
|
||||
return datastore.runQuery(req).getQuery();
|
||||
}
|
||||
|
||||
/**
|
||||
* Returns a new {@link MultiRead} that reads from the Cloud Datastore for the specified
|
||||
* project.
|
||||
*/
|
||||
public MultiRead withProjectId(String projectId) {
|
||||
checkArgument(projectId != null, "projectId can not be null");
|
||||
return toBuilder().setProjectId(projectId).build();
|
||||
}
|
||||
|
||||
/** Returns a new {@link MultiRead} that reads from the given namespace. */
|
||||
public MultiRead withNamespace(String namespace) {
|
||||
return toBuilder().setNamespace(namespace).build();
|
||||
}
|
||||
|
||||
/**
|
||||
* Returns a new {@link MultiRead} that reads by splitting the given {@code query} into {@code
|
||||
* numQuerySplits}.
|
||||
*
|
||||
* <p>The semantics for the query splitting is defined below:
|
||||
*
|
||||
* <ul>
|
||||
* <li>Any value less than or equal to 0 will be ignored, and the number of splits will be
|
||||
* chosen dynamically at runtime based on the query data size.
|
||||
* <li>Any value greater than {@link MultiRead#NUM_QUERY_SPLITS_MAX} will be capped at {@code
|
||||
* NUM_QUERY_SPLITS_MAX}.
|
||||
* <li>If the {@code query} has a user limit set, then {@code numQuerySplits} will be ignored
|
||||
* and no split will be performed.
|
||||
* <li>Under certain cases Cloud Datastore is unable to split query to the requested number of
|
||||
* splits. In such cases we just use whatever the Cloud Datastore returns.
|
||||
* </ul>
|
||||
*/
|
||||
public MultiRead withNumQuerySplits(int numQuerySplits) {
|
||||
return toBuilder()
|
||||
.setNumQuerySplits(Math.min(Math.max(numQuerySplits, 0), NUM_QUERY_SPLITS_MAX))
|
||||
.build();
|
||||
}
|
||||
|
||||
/**
|
||||
* Returns a new {@link MultiRead} that reads from a Datastore Emulator running at the given
|
||||
* localhost address.
|
||||
*/
|
||||
public MultiRead withLocalhost(String localhost) {
|
||||
return toBuilder().setLocalhost(localhost).build();
|
||||
}
|
||||
|
||||
/** Returns Number of entities available for reading. */
|
||||
public long getNumEntities(
|
||||
PipelineOptions options, String ourKind, @Nullable String namespace) {
|
||||
try {
|
||||
V1Options v1Options = V1Options.from(getProjectId(), getNamespace(), getLocalhost());
|
||||
V1DatastoreFactory datastoreFactory = new V1DatastoreFactory();
|
||||
Datastore datastore =
|
||||
datastoreFactory.getDatastore(
|
||||
options, v1Options.getProjectId(), v1Options.getLocalhost());
|
||||
|
||||
Entity entity = getLatestTableStats(ourKind, namespace, datastore);
|
||||
return entity.getProperties().get("count").getIntegerValue();
|
||||
} catch (Exception e) {
|
||||
return -1;
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public PCollection<Entity> expand(PCollection<String> gqlQueries) {
|
||||
checkArgument(getProjectId() != null, "projectId cannot be null");
|
||||
|
||||
V1Options v1Options = V1Options.from(getProjectId(), getNamespace(), getLocalhost());
|
||||
|
||||
/*
|
||||
* This composite transform involves the following steps:
|
||||
* 1. Apply a {@link ParDo} that translates each query in {@code gqlQueries} into a {@code
|
||||
* query}.
|
||||
*
|
||||
* 2. A {@link ParDo} splits the resulting query into {@code numQuerySplits} and
|
||||
* assign each split query a unique {@code Integer} as the key. The resulting output is
|
||||
* of the type {@code PCollection<KV<Integer, Query>>}.
|
||||
*
|
||||
* If the value of {@code numQuerySplits} is less than or equal to 0, then the number of
|
||||
* splits will be computed dynamically based on the size of the data for the {@code query}.
|
||||
*
|
||||
* 3. The resulting {@code PCollection} is sharded using a {@link GroupByKey} operation. The
|
||||
* queries are extracted from they {@code KV<Integer, Iterable<Query>>} and flattened to
|
||||
* output a {@code PCollection<Query>}.
|
||||
*
|
||||
* 4. In the third step, a {@code ParDo} reads entities for each query and outputs
|
||||
* a {@code PCollection<Entity>}.
|
||||
*/
|
||||
|
||||
PCollection<Query> inputQuery =
|
||||
gqlQueries.apply(ParDo.of(new GqlQueryTranslateFn(v1Options)));
|
||||
|
||||
return inputQuery
|
||||
.apply("Split", ParDo.of(new SplitQueryFn(v1Options, getNumQuerySplits())))
|
||||
.apply("Reshuffle", Reshuffle.viaRandomKey())
|
||||
.apply("Read", ParDo.of(new ReadFn(v1Options)));
|
||||
}
|
||||
|
||||
@Override
|
||||
public void populateDisplayData(DisplayData.Builder builder) {
|
||||
super.populateDisplayData(builder);
|
||||
builder
|
||||
.addIfNotNull(DisplayData.item("projectId", getProjectId()).withLabel("ProjectId"))
|
||||
.addIfNotNull(DisplayData.item("namespace", getNamespace()).withLabel("Namespace"));
|
||||
}
|
||||
|
||||
private static class V1Options implements HasDisplayData, Serializable {
|
||||
private final String project;
|
||||
private final @Nullable String namespace;
|
||||
private final @Nullable String localhost;
|
||||
|
||||
private V1Options(String project, @Nullable String namespace, @Nullable String localhost) {
|
||||
this.project = project;
|
||||
this.namespace = namespace;
|
||||
this.localhost = localhost;
|
||||
}
|
||||
|
||||
public static V1Options from(
|
||||
String projectId, @Nullable String namespace, @Nullable String localhost) {
|
||||
return new V1Options(projectId, namespace, localhost);
|
||||
}
|
||||
|
||||
public String getProjectId() {
|
||||
return project;
|
||||
}
|
||||
|
||||
public @Nullable String getNamespace() {
|
||||
return namespace;
|
||||
}
|
||||
|
||||
public @Nullable String getLocalhost() {
|
||||
return localhost;
|
||||
}
|
||||
|
||||
@Override
|
||||
public void populateDisplayData(DisplayData.Builder builder) {
|
||||
builder
|
||||
.addIfNotNull(DisplayData.item("projectId", getProjectId()).withLabel("ProjectId"))
|
||||
.addIfNotNull(DisplayData.item("namespace", getNamespace()).withLabel("Namespace"));
|
||||
}
|
||||
}
|
||||
|
||||
/** A DoFn that translates a Cloud Datastore gql query string to {@code Query}. */
|
||||
static class GqlQueryTranslateFn extends DoFn<String, Query> {
|
||||
private final V1Options v1Options;
|
||||
private transient Datastore datastore;
|
||||
private final V1DatastoreFactory datastoreFactory;
|
||||
|
||||
GqlQueryTranslateFn(V1Options options) {
|
||||
this(options, new V1DatastoreFactory());
|
||||
}
|
||||
|
||||
GqlQueryTranslateFn(V1Options options, V1DatastoreFactory datastoreFactory) {
|
||||
this.v1Options = options;
|
||||
this.datastoreFactory = datastoreFactory;
|
||||
}
|
||||
|
||||
@StartBundle
|
||||
public void startBundle(StartBundleContext c) throws Exception {
|
||||
datastore =
|
||||
datastoreFactory.getDatastore(
|
||||
c.getPipelineOptions(), v1Options.getProjectId(), v1Options.getLocalhost());
|
||||
}
|
||||
|
||||
@ProcessElement
|
||||
public void processElement(ProcessContext c) throws Exception {
|
||||
String gqlQuery = c.element();
|
||||
logger.atInfo().log("User query: '%s'", gqlQuery);
|
||||
Query query =
|
||||
translateGqlQueryWithLimitCheck(gqlQuery, datastore, v1Options.getNamespace());
|
||||
logger.atInfo().log("User gql query translated to Query(%s)", query);
|
||||
c.output(query);
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* A {@link DoFn} that splits a given query into multiple sub-queries, assigns them unique keys
|
||||
* and outputs them as {@link KV}.
|
||||
*/
|
||||
private static class SplitQueryFn extends DoFn<Query, Query> {
|
||||
private final V1Options options;
|
||||
// number of splits to make for a given query
|
||||
private final int numSplits;
|
||||
|
||||
private final V1DatastoreFactory datastoreFactory;
|
||||
// Datastore client
|
||||
private transient Datastore datastore;
|
||||
// Query splitter
|
||||
private transient QuerySplitter querySplitter;
|
||||
|
||||
public SplitQueryFn(V1Options options, int numSplits) {
|
||||
this(options, numSplits, new V1DatastoreFactory());
|
||||
}
|
||||
|
||||
private SplitQueryFn(V1Options options, int numSplits, V1DatastoreFactory datastoreFactory) {
|
||||
this.options = options;
|
||||
this.numSplits = numSplits;
|
||||
this.datastoreFactory = datastoreFactory;
|
||||
}
|
||||
|
||||
@StartBundle
|
||||
public void startBundle(StartBundleContext c) throws Exception {
|
||||
datastore =
|
||||
datastoreFactory.getDatastore(
|
||||
c.getPipelineOptions(), options.getProjectId(), options.getLocalhost());
|
||||
querySplitter = datastoreFactory.getQuerySplitter();
|
||||
}
|
||||
|
||||
@ProcessElement
|
||||
public void processElement(ProcessContext c) throws Exception {
|
||||
Query query = c.element();
|
||||
|
||||
// If query has a user set limit, then do not split.
|
||||
if (query.hasLimit()) {
|
||||
c.output(query);
|
||||
return;
|
||||
}
|
||||
|
||||
int estimatedNumSplits;
|
||||
// Compute the estimated numSplits if numSplits is not specified by the user.
|
||||
if (numSplits <= 0) {
|
||||
estimatedNumSplits = getEstimatedNumSplits(datastore, query, options.getNamespace());
|
||||
} else {
|
||||
estimatedNumSplits = numSplits;
|
||||
}
|
||||
|
||||
logger.atInfo().log("Splitting the query into %s splits", estimatedNumSplits);
|
||||
List<Query> querySplits;
|
||||
try {
|
||||
querySplits =
|
||||
splitQuery(
|
||||
query, options.getNamespace(), datastore, querySplitter, estimatedNumSplits);
|
||||
} catch (Exception e) {
|
||||
logger.atWarning().log("Unable to parallelize the given query: %s", query, e);
|
||||
querySplits = ImmutableList.of(query);
|
||||
}
|
||||
|
||||
// assign unique keys to query splits.
|
||||
for (Query subquery : querySplits) {
|
||||
c.output(subquery);
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public void populateDisplayData(DisplayData.Builder builder) {
|
||||
super.populateDisplayData(builder);
|
||||
builder.include("options", options);
|
||||
if (numSplits > 0) {
|
||||
builder.add(
|
||||
DisplayData.item("numQuerySplits", numSplits)
|
||||
.withLabel("Requested number of Query splits"));
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/** A {@link DoFn} that reads entities from Cloud Datastore for each query. */
|
||||
private static class ReadFn extends DoFn<Query, Entity> {
|
||||
private final V1Options options;
|
||||
private final V1DatastoreFactory datastoreFactory;
|
||||
// Datastore client
|
||||
private transient Datastore datastore;
|
||||
private final Counter rpcErrors = Metrics.counter(ReadFn.class, "datastoreRpcErrors");
|
||||
private final Counter rpcSuccesses = Metrics.counter(ReadFn.class, "datastoreRpcSuccesses");
|
||||
private static final int MAX_RETRIES = 5;
|
||||
private static final FluentBackoff RUNQUERY_BACKOFF =
|
||||
FluentBackoff.DEFAULT
|
||||
.withMaxRetries(MAX_RETRIES)
|
||||
.withInitialBackoff(Duration.standardSeconds(5));
|
||||
|
||||
public ReadFn(V1Options options) {
|
||||
this(options, new V1DatastoreFactory());
|
||||
}
|
||||
|
||||
private ReadFn(V1Options options, V1DatastoreFactory datastoreFactory) {
|
||||
this.options = options;
|
||||
this.datastoreFactory = datastoreFactory;
|
||||
}
|
||||
|
||||
@StartBundle
|
||||
public void startBundle(StartBundleContext c) throws Exception {
|
||||
datastore =
|
||||
datastoreFactory.getDatastore(
|
||||
c.getPipelineOptions(), options.getProjectId(), options.getLocalhost());
|
||||
}
|
||||
|
||||
private RunQueryResponse runQueryWithRetries(RunQueryRequest request) throws Exception {
|
||||
Sleeper sleeper = Sleeper.DEFAULT;
|
||||
BackOff backoff = RUNQUERY_BACKOFF.backoff();
|
||||
while (true) {
|
||||
try {
|
||||
RunQueryResponse response = datastore.runQuery(request);
|
||||
rpcSuccesses.inc();
|
||||
return response;
|
||||
} catch (DatastoreException exception) {
|
||||
rpcErrors.inc();
|
||||
|
||||
if (NON_RETRYABLE_ERRORS.contains(exception.getCode())) {
|
||||
throw exception;
|
||||
}
|
||||
if (!BackOffUtils.next(sleeper, backoff)) {
|
||||
logger.atSevere().log("Aborting after %s retries.", MAX_RETRIES);
|
||||
throw exception;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/** Read and output entities for the given query. */
|
||||
@ProcessElement
|
||||
public void processElement(ProcessContext context) throws Exception {
|
||||
Query query = context.element();
|
||||
String namespace = options.getNamespace();
|
||||
int userLimit = query.hasLimit() ? query.getLimit().getValue() : Integer.MAX_VALUE;
|
||||
|
||||
boolean moreResults = true;
|
||||
QueryResultBatch currentBatch = null;
|
||||
|
||||
while (moreResults) {
|
||||
Query.Builder queryBuilder = query.toBuilder();
|
||||
queryBuilder.setLimit(
|
||||
Int32Value.newBuilder().setValue(Math.min(userLimit, QUERY_BATCH_LIMIT)));
|
||||
|
||||
if (currentBatch != null && !currentBatch.getEndCursor().isEmpty()) {
|
||||
queryBuilder.setStartCursor(currentBatch.getEndCursor());
|
||||
}
|
||||
|
||||
RunQueryRequest request = makeRequest(queryBuilder.build(), namespace);
|
||||
RunQueryResponse response = runQueryWithRetries(request);
|
||||
|
||||
currentBatch = response.getBatch();
|
||||
|
||||
// MORE_RESULTS_AFTER_LIMIT is not implemented yet:
|
||||
// https://groups.google.com/forum/#!topic/gcd-discuss/iNs6M1jA2Vw, so
|
||||
// use result count to determine if more results might exist.
|
||||
int numFetch = currentBatch.getEntityResultsCount();
|
||||
if (query.hasLimit()) {
|
||||
verify(
|
||||
userLimit >= numFetch,
|
||||
"Expected userLimit %s >= numFetch %s, because query limit %s must be <= userLimit",
|
||||
userLimit,
|
||||
numFetch,
|
||||
query.getLimit());
|
||||
userLimit -= numFetch;
|
||||
}
|
||||
|
||||
// output all the entities from the current batch.
|
||||
for (EntityResult entityResult : currentBatch.getEntityResultsList()) {
|
||||
context.output(entityResult.getEntity());
|
||||
}
|
||||
|
||||
// Check if we have more entities to be read.
|
||||
moreResults =
|
||||
// User-limit does not exist (so userLimit == MAX_VALUE) and/or has not been satisfied
|
||||
(userLimit > 0)
|
||||
// All indications from the API are that there are/may be more results.
|
||||
&& ((numFetch == QUERY_BATCH_LIMIT)
|
||||
|| (currentBatch.getMoreResults() == NOT_FINISHED));
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public void populateDisplayData(DisplayData.Builder builder) {
|
||||
super.populateDisplayData(builder);
|
||||
builder.include("options", options);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* A wrapper factory class for Cloud Datastore singleton classes {@link DatastoreFactory} and
|
||||
* {@link QuerySplitter}
|
||||
*
|
||||
* <p>{@link DatastoreFactory} and {@link QuerySplitter} are not java serializable, hence wrapping
|
||||
* them under this class, which implements {@link Serializable}.
|
||||
*/
|
||||
private static class V1DatastoreFactory implements Serializable {
|
||||
|
||||
/** Builds a Cloud Datastore client for the given pipeline options and project. */
|
||||
public Datastore getDatastore(PipelineOptions pipelineOptions, String projectId) {
|
||||
return getDatastore(pipelineOptions, projectId, null);
|
||||
}
|
||||
|
||||
/**
|
||||
* Builds a Cloud Datastore client for the given pipeline options, project and an optional
|
||||
* locahost.
|
||||
*/
|
||||
public Datastore getDatastore(
|
||||
PipelineOptions pipelineOptions, String projectId, @Nullable String localhost) {
|
||||
Credentials credential = pipelineOptions.as(GcpOptions.class).getGcpCredential();
|
||||
HttpRequestInitializer initializer;
|
||||
if (credential != null) {
|
||||
initializer =
|
||||
new ChainingHttpRequestInitializer(
|
||||
new HttpCredentialsAdapter(credential), new RetryHttpRequestInitializer());
|
||||
} else {
|
||||
initializer = new RetryHttpRequestInitializer();
|
||||
}
|
||||
|
||||
DatastoreOptions.Builder builder =
|
||||
new DatastoreOptions.Builder().projectId(projectId).initializer(initializer);
|
||||
|
||||
if (localhost != null) {
|
||||
builder.localHost(localhost);
|
||||
} else {
|
||||
builder.host("batch-datastore.googleapis.com");
|
||||
}
|
||||
|
||||
return DatastoreFactory.get().create(builder.build());
|
||||
}
|
||||
|
||||
/** Builds a Cloud Datastore {@link QuerySplitter}. */
|
||||
public QuerySplitter getQuerySplitter() {
|
||||
return DatastoreHelper.getQuerySplitter();
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -30,7 +30,9 @@ import google.registry.keyring.kms.KmsModule;
|
||||
import google.registry.persistence.PersistenceModule;
|
||||
import google.registry.persistence.PersistenceModule.JdbcJpaTm;
|
||||
import google.registry.persistence.PersistenceModule.SocketFactoryJpaTm;
|
||||
import google.registry.persistence.PersistenceModule.TransactionIsolationLevel;
|
||||
import google.registry.persistence.transaction.JpaTransactionManager;
|
||||
import google.registry.privileges.secretmanager.SecretManagerModule;
|
||||
import google.registry.util.UtilsModule;
|
||||
import java.io.BufferedReader;
|
||||
import java.io.IOException;
|
||||
@@ -56,6 +58,7 @@ public class BeamJpaModule {
|
||||
|
||||
@Nullable private final String sqlAccessInfoFile;
|
||||
@Nullable private final String cloudKmsProjectId;
|
||||
@Nullable private final TransactionIsolationLevel isolationOverride;
|
||||
|
||||
/**
|
||||
* Constructs a new instance of {@link BeamJpaModule}.
|
||||
@@ -72,10 +75,20 @@ public class BeamJpaModule {
|
||||
* real encrypted file on GCS as returned by {@link
|
||||
* BackupPaths#getCloudSQLCredentialFilePatterns} or an unencrypted file on local filesystem
|
||||
* with credentials to a test database.
|
||||
* @param cloudKmsProjectId the GCP project where the credential decryption key can be found
|
||||
* @param isolationOverride the desired Transaction Isolation level for all JDBC connections
|
||||
*/
|
||||
public BeamJpaModule(@Nullable String sqlAccessInfoFile, @Nullable String cloudKmsProjectId) {
|
||||
public BeamJpaModule(
|
||||
@Nullable String sqlAccessInfoFile,
|
||||
@Nullable String cloudKmsProjectId,
|
||||
@Nullable TransactionIsolationLevel isolationOverride) {
|
||||
this.sqlAccessInfoFile = sqlAccessInfoFile;
|
||||
this.cloudKmsProjectId = cloudKmsProjectId;
|
||||
this.isolationOverride = isolationOverride;
|
||||
}
|
||||
|
||||
public BeamJpaModule(@Nullable String sqlAccessInfoFile, @Nullable String cloudKmsProjectId) {
|
||||
this(sqlAccessInfoFile, cloudKmsProjectId, null);
|
||||
}
|
||||
|
||||
/** Returns true if the credential file is on GCS (and therefore expected to be encrypted). */
|
||||
@@ -153,6 +166,13 @@ public class BeamJpaModule {
|
||||
return "nomulus-tool-keyring";
|
||||
}
|
||||
|
||||
@Provides
|
||||
@Config("beamIsolationOverride")
|
||||
@Nullable
|
||||
TransactionIsolationLevel providesIsolationOverride() {
|
||||
return isolationOverride;
|
||||
}
|
||||
|
||||
@Provides
|
||||
@Config("beamHibernateHikariMaximumPoolSize")
|
||||
static int getBeamHibernateHikariMaximumPoolSize() {
|
||||
@@ -168,6 +188,7 @@ public class BeamJpaModule {
|
||||
BeamJpaModule.class,
|
||||
KmsModule.class,
|
||||
PersistenceModule.class,
|
||||
SecretManagerModule.class,
|
||||
UtilsModule.class
|
||||
})
|
||||
public interface JpaTransactionManagerComponent {
|
||||
|
||||
@@ -26,17 +26,15 @@ final class DomainBaseUtil {
|
||||
private DomainBaseUtil() {}
|
||||
|
||||
/**
|
||||
* Removes {@link google.registry.model.billing.BillingEvent.Recurring}, {@link
|
||||
* google.registry.model.poll.PollMessage PollMessages} and {@link
|
||||
* google.registry.model.host.HostResource name servers} from a Datastore {@link Entity} that
|
||||
* represents an Ofy {@link google.registry.model.domain.DomainBase}. This breaks the cycle of
|
||||
* foreign key constraints between these entity kinds, allowing {@code DomainBases} to be inserted
|
||||
* into the SQL database. See {@link InitSqlPipeline} for a use case, where the full {@code
|
||||
* DomainBases} are written again during the last stage of the pipeline.
|
||||
* Removes properties that contain foreign keys from a Datastore {@link Entity} that represents an
|
||||
* Ofy {@link google.registry.model.domain.DomainBase}. This breaks the cycle of foreign key
|
||||
* constraints between entity kinds, allowing {@code DomainBases} to be inserted into the SQL
|
||||
* database. See {@link InitSqlPipeline} for a use case, where the full {@code DomainBases} are
|
||||
* written again during the last stage of the pipeline.
|
||||
*
|
||||
* <p>The returned object may be in bad state. Specifically, {@link
|
||||
* google.registry.model.eppcommon.StatusValue#INACTIVE} is not added after name servers are
|
||||
* removed. This only impacts tests.
|
||||
* removed. This only impacts tests that manipulate Datastore entities directly.
|
||||
*
|
||||
* <p>This operation is performed on an Datastore {@link Entity} instead of Ofy Java object
|
||||
* because Objectify requires access to a Datastore service when converting an Ofy object to a
|
||||
@@ -70,6 +68,9 @@ final class DomainBaseUtil {
|
||||
domainBase.getProperties().keySet().stream()
|
||||
.filter(s -> s.startsWith("transferData."))
|
||||
.forEach(s -> clone.removeProperty(s));
|
||||
domainBase.getProperties().keySet().stream()
|
||||
.filter(s -> s.startsWith("gracePeriods."))
|
||||
.forEach(s -> clone.removeProperty(s));
|
||||
return clone;
|
||||
}
|
||||
}
|
||||
|
||||
@@ -34,6 +34,7 @@ import google.registry.model.registrar.Registrar;
|
||||
import google.registry.model.registrar.RegistrarContact;
|
||||
import google.registry.model.registry.Registry;
|
||||
import google.registry.model.reporting.HistoryEntry;
|
||||
import google.registry.persistence.PersistenceModule.TransactionIsolationLevel;
|
||||
import google.registry.persistence.transaction.JpaTransactionManager;
|
||||
import java.io.Serializable;
|
||||
import java.util.Collection;
|
||||
@@ -77,14 +78,22 @@ import org.joda.time.DateTime;
|
||||
* HistoryEntry}.
|
||||
* <li>{@link BillingEvent.OneTime}: references {@code Registrar}, {@code DomainBase}, {@code
|
||||
* BillingEvent.Recurring}, {@code HistoryEntry} and {@code AllocationToken}.
|
||||
* <li>{@link BillingEvent.Modification}: SQL model TBD. Will reference {@code Registrar}, {@code
|
||||
* DomainBase} and {@code BillingEvent.OneTime}.
|
||||
* <li>{@link BillingEvent.Cancellation}: references {@code Registrar}, {@code DomainBase}, {@code
|
||||
* BillingEvent.Recurring}, {@code BillingEvent.OneTime}, and {@code HistoryEntry}.
|
||||
* <li>{@link PollMessage}: references {@code Registrar}, {@code DomainBase}, {@code
|
||||
* ContactResource}, {@code HostResource}, and {@code HistoryEntry}.
|
||||
* <li>{@link DomainBase}, original copy from Datastore.
|
||||
* </ol>
|
||||
*
|
||||
* <p>This pipeline expects that the source Datastore has at least one entity in each of the types
|
||||
* above. This assumption allows us to construct a simpler pipeline graph that can be visually
|
||||
* examined, and is true in all intended use cases. However, tests must not violate this assumption
|
||||
* when setting up data, otherwise they may run into foreign key constraint violations. The reason
|
||||
* is that this pipeline uses the {@link Wait} transform to order the persistence by entity type.
|
||||
* However, the wait is skipped if the target type has no data, resulting in subsequent entity types
|
||||
* starting prematurely. E.g., if a Datastore has no {@code RegistrarContact} entities, the pipeline
|
||||
* may start writing {@code DomainBase} entities before all {@code Registry}, {@code Registrar} and
|
||||
* {@code ContactResource} entities have been persisted.
|
||||
*/
|
||||
public class InitSqlPipeline implements Serializable {
|
||||
|
||||
@@ -93,24 +102,23 @@ public class InitSqlPipeline implements Serializable {
|
||||
* DomainBase}.
|
||||
*/
|
||||
private static final ImmutableList<Class<?>> PHASE_ONE_ORDERED =
|
||||
ImmutableList.of(Registry.class, Registrar.class, ContactResource.class);
|
||||
ImmutableList.of(
|
||||
Registry.class, Registrar.class, ContactResource.class, RegistrarContact.class);
|
||||
|
||||
/**
|
||||
* Datastore kinds to be written to the SQL database after the cleansed version of {@link
|
||||
* DomainBase}.
|
||||
*
|
||||
* <p>The following entities are missing from the list:
|
||||
*
|
||||
* <ul>
|
||||
* <li>Those not modeled in JPA yet, e.g., {@code BillingEvent.Modification}.
|
||||
* <li>Those waiting for sanitation, e.g., {@code HistoryEntry}, which would have duplicate keys
|
||||
* after converting to SQL model.
|
||||
* <li>Those that have foreign key constraints on the above.
|
||||
* </ul>
|
||||
*/
|
||||
// TODO(weiminyu): add more entities when available.
|
||||
private static final ImmutableList<Class<?>> PHASE_TWO_ORDERED =
|
||||
ImmutableList.of(HostResource.class);
|
||||
ImmutableList.of(
|
||||
HostResource.class,
|
||||
HistoryEntry.class,
|
||||
AllocationToken.class,
|
||||
BillingEvent.Recurring.class,
|
||||
BillingEvent.OneTime.class,
|
||||
BillingEvent.Cancellation.class,
|
||||
PollMessage.class,
|
||||
DomainBase.class);
|
||||
|
||||
private final InitSqlPipelineOptions options;
|
||||
|
||||
@@ -226,7 +234,11 @@ public class InitSqlPipeline implements Serializable {
|
||||
transformId,
|
||||
options.getMaxConcurrentSqlWriters(),
|
||||
options.getSqlWriteBatchSize(),
|
||||
new JpaSupplierFactory(credentialFileUrl, options.getCloudKmsProjectId(), jpaGetter)));
|
||||
new JpaSupplierFactory(
|
||||
credentialFileUrl,
|
||||
options.getCloudKmsProjectId(),
|
||||
jpaGetter,
|
||||
TransactionIsolationLevel.TRANSACTION_READ_UNCOMMITTED)));
|
||||
}
|
||||
|
||||
private static ImmutableList<String> toKindStrings(Collection<Class<?>> entityClasses) {
|
||||
|
||||
@@ -16,6 +16,7 @@ package google.registry.beam.initsql;
|
||||
|
||||
import google.registry.beam.initsql.BeamJpaModule.JpaTransactionManagerComponent;
|
||||
import google.registry.beam.initsql.Transforms.SerializableSupplier;
|
||||
import google.registry.persistence.PersistenceModule.TransactionIsolationLevel;
|
||||
import google.registry.persistence.transaction.JpaTransactionManager;
|
||||
import javax.annotation.Nullable;
|
||||
import org.apache.beam.sdk.transforms.SerializableFunction;
|
||||
@@ -28,21 +29,32 @@ public class JpaSupplierFactory implements SerializableSupplier<JpaTransactionMa
|
||||
@Nullable private final String cloudKmsProjectId;
|
||||
private final SerializableFunction<JpaTransactionManagerComponent, JpaTransactionManager>
|
||||
jpaGetter;
|
||||
@Nullable private final TransactionIsolationLevel isolationLevelOverride;
|
||||
|
||||
public JpaSupplierFactory(
|
||||
String credentialFileUrl,
|
||||
@Nullable String cloudKmsProjectId,
|
||||
SerializableFunction<JpaTransactionManagerComponent, JpaTransactionManager> jpaGetter) {
|
||||
this(credentialFileUrl, cloudKmsProjectId, jpaGetter, null);
|
||||
}
|
||||
|
||||
public JpaSupplierFactory(
|
||||
String credentialFileUrl,
|
||||
@Nullable String cloudKmsProjectId,
|
||||
SerializableFunction<JpaTransactionManagerComponent, JpaTransactionManager> jpaGetter,
|
||||
@Nullable TransactionIsolationLevel isolationLevelOverride) {
|
||||
this.credentialFileUrl = credentialFileUrl;
|
||||
this.cloudKmsProjectId = cloudKmsProjectId;
|
||||
this.jpaGetter = jpaGetter;
|
||||
this.isolationLevelOverride = isolationLevelOverride;
|
||||
}
|
||||
|
||||
@Override
|
||||
public JpaTransactionManager get() {
|
||||
return jpaGetter.apply(
|
||||
DaggerBeamJpaModule_JpaTransactionManagerComponent.builder()
|
||||
.beamJpaModule(new BeamJpaModule(credentialFileUrl, cloudKmsProjectId))
|
||||
.beamJpaModule(
|
||||
new BeamJpaModule(credentialFileUrl, cloudKmsProjectId, isolationLevelOverride))
|
||||
.build());
|
||||
}
|
||||
}
|
||||
|
||||
@@ -17,11 +17,9 @@ package google.registry.beam.initsql;
|
||||
import static com.google.common.base.Preconditions.checkArgument;
|
||||
import static com.google.common.base.Preconditions.checkNotNull;
|
||||
import static com.google.common.base.Preconditions.checkState;
|
||||
import static com.google.common.base.Throwables.throwIfUnchecked;
|
||||
import static google.registry.beam.initsql.BackupPaths.getCommitLogTimestamp;
|
||||
import static google.registry.beam.initsql.BackupPaths.getExportFilePatterns;
|
||||
import static google.registry.model.ofy.ObjectifyService.ofy;
|
||||
import static google.registry.persistence.JpaRetries.isFailedTxnRetriable;
|
||||
import static google.registry.persistence.transaction.TransactionManagerFactory.jpaTm;
|
||||
import static google.registry.persistence.transaction.TransactionManagerFactory.setJpaTm;
|
||||
import static google.registry.util.DateTimeUtils.START_OF_TIME;
|
||||
@@ -38,28 +36,34 @@ import com.google.common.annotations.VisibleForTesting;
|
||||
import com.google.common.collect.ImmutableList;
|
||||
import com.google.common.collect.ImmutableMap;
|
||||
import com.google.common.collect.Streams;
|
||||
import com.googlecode.objectify.Key;
|
||||
import google.registry.backup.AppEngineEnvironment;
|
||||
import google.registry.backup.CommitLogImports;
|
||||
import google.registry.backup.VersionedEntity;
|
||||
import google.registry.model.domain.DomainBase;
|
||||
import google.registry.model.ofy.ObjectifyService;
|
||||
import google.registry.model.ofy.Ofy;
|
||||
import google.registry.model.reporting.HistoryEntry;
|
||||
import google.registry.persistence.transaction.JpaTransactionManager;
|
||||
import google.registry.schema.replay.DatastoreAndSqlEntity;
|
||||
import google.registry.schema.replay.SqlEntity;
|
||||
import google.registry.tools.LevelDbLogReader;
|
||||
import google.registry.util.SystemSleeper;
|
||||
import java.io.Serializable;
|
||||
import java.util.Collection;
|
||||
import java.util.Iterator;
|
||||
import java.util.Objects;
|
||||
import java.util.Optional;
|
||||
import java.util.Set;
|
||||
import java.util.concurrent.ThreadLocalRandom;
|
||||
import java.util.function.Supplier;
|
||||
import javax.annotation.Nullable;
|
||||
import org.apache.beam.sdk.coders.StringUtf8Coder;
|
||||
import org.apache.beam.sdk.io.Compression;
|
||||
import org.apache.beam.sdk.io.FileIO;
|
||||
import org.apache.beam.sdk.io.FileIO.ReadableFile;
|
||||
import org.apache.beam.sdk.io.fs.EmptyMatchTreatment;
|
||||
import org.apache.beam.sdk.io.fs.MatchResult.Metadata;
|
||||
import org.apache.beam.sdk.metrics.Counter;
|
||||
import org.apache.beam.sdk.metrics.Metrics;
|
||||
import org.apache.beam.sdk.transforms.Create;
|
||||
import org.apache.beam.sdk.transforms.DoFn;
|
||||
import org.apache.beam.sdk.transforms.Flatten;
|
||||
@@ -79,7 +83,6 @@ import org.apache.beam.sdk.values.TupleTag;
|
||||
import org.apache.beam.sdk.values.TupleTagList;
|
||||
import org.apache.beam.sdk.values.TypeDescriptor;
|
||||
import org.joda.time.DateTime;
|
||||
import org.joda.time.Duration;
|
||||
|
||||
/**
|
||||
* {@link PTransform Pipeline transforms} used in pipelines that load from both Datastore export
|
||||
@@ -289,7 +292,7 @@ public final class Transforms {
|
||||
maxWriters,
|
||||
batchSize,
|
||||
jpaSupplier,
|
||||
(e) -> ofy().toPojo(e.getEntity().get()),
|
||||
Transforms::convertVersionedEntityToSqlEntity,
|
||||
TypeDescriptor.of(VersionedEntity.class));
|
||||
}
|
||||
|
||||
@@ -330,11 +333,50 @@ public final class Transforms {
|
||||
.apply("Batch output by shard " + transformId, GroupIntoBatches.ofSize(batchSize))
|
||||
.apply(
|
||||
"Write in batch for " + transformId,
|
||||
ParDo.of(new SqlBatchWriter<T>(jpaSupplier, jpaConverter)));
|
||||
ParDo.of(new SqlBatchWriter<T>(transformId, jpaSupplier, jpaConverter)));
|
||||
}
|
||||
};
|
||||
}
|
||||
|
||||
private static Key toOfyKey(Object ofyEntity) {
|
||||
return Key.create(ofyEntity);
|
||||
}
|
||||
|
||||
private static boolean isMigratable(Entity entity) {
|
||||
if (entity.getKind().equals("HistoryEntry")) {
|
||||
// DOMAIN_APPLICATION_CREATE is deprecated type and should not be migrated.
|
||||
// The Enum name DOMAIN_APPLICATION_CREATE no longer exists in Java and cannot
|
||||
// be deserialized.
|
||||
return !Objects.equals(entity.getProperty("type"), "DOMAIN_APPLICATION_CREATE");
|
||||
}
|
||||
return true;
|
||||
}
|
||||
|
||||
private static SqlEntity toSqlEntity(Object ofyEntity) {
|
||||
if (ofyEntity instanceof HistoryEntry) {
|
||||
HistoryEntry ofyHistory = (HistoryEntry) ofyEntity;
|
||||
return (SqlEntity) ofyHistory.toChildHistoryEntity();
|
||||
}
|
||||
return ((DatastoreAndSqlEntity) ofyEntity).toSqlEntity().get();
|
||||
}
|
||||
|
||||
/**
|
||||
* Converts a {@link VersionedEntity} to an JPA entity for persistence.
|
||||
*
|
||||
* @return An object to be persisted to SQL, or null if the input is not to be migrated. (Not
|
||||
* using Optional in return because as a one-use method, we do not want to invest the effort
|
||||
* to make Optional work with BEAM)
|
||||
*/
|
||||
@Nullable
|
||||
private static Object convertVersionedEntityToSqlEntity(VersionedEntity dsEntity) {
|
||||
return dsEntity
|
||||
.getEntity()
|
||||
.filter(Transforms::isMigratable)
|
||||
.map(e -> ofy().toPojo(e))
|
||||
.map(Transforms::toSqlEntity)
|
||||
.orElse(null);
|
||||
}
|
||||
|
||||
/** Interface for serializable {@link Supplier suppliers}. */
|
||||
public interface SerializableSupplier<T> extends Supplier<T>, Serializable {}
|
||||
|
||||
@@ -429,26 +471,24 @@ public final class Transforms {
|
||||
private static int instanceCount = 0;
|
||||
private static JpaTransactionManager originalJpa;
|
||||
|
||||
private Counter counter;
|
||||
|
||||
private final SerializableSupplier<JpaTransactionManager> jpaSupplier;
|
||||
private final SerializableFunction<T, Object> jpaConverter;
|
||||
|
||||
private transient Ofy ofy;
|
||||
private transient SystemSleeper sleeper;
|
||||
|
||||
SqlBatchWriter(
|
||||
String type,
|
||||
SerializableSupplier<JpaTransactionManager> jpaSupplier,
|
||||
SerializableFunction<T, Object> jpaConverter) {
|
||||
counter = Metrics.counter("SQL_WRITE", type);
|
||||
this.jpaSupplier = jpaSupplier;
|
||||
this.jpaConverter = jpaConverter;
|
||||
}
|
||||
|
||||
@Setup
|
||||
public void setup() {
|
||||
sleeper = new SystemSleeper();
|
||||
|
||||
try (AppEngineEnvironment env = new AppEngineEnvironment()) {
|
||||
ObjectifyService.initOfy();
|
||||
ofy = ObjectifyService.ofy();
|
||||
}
|
||||
|
||||
synchronized (SqlBatchWriter.class) {
|
||||
@@ -477,31 +517,29 @@ public final class Transforms {
|
||||
ImmutableList<Object> ofyEntities =
|
||||
Streams.stream(kv.getValue())
|
||||
.map(this.jpaConverter::apply)
|
||||
// TODO(b/177340730): post migration delete the line below.
|
||||
.filter(Objects::nonNull)
|
||||
.collect(ImmutableList.toImmutableList());
|
||||
retry(() -> jpaTm().transact(() -> jpaTm().putAll(ofyEntities)));
|
||||
try {
|
||||
jpaTm().transact(() -> jpaTm().putAll(ofyEntities));
|
||||
counter.inc(ofyEntities.size());
|
||||
} catch (RuntimeException e) {
|
||||
processSingly(ofyEntities);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// TODO(b/160632289): Enhance Retrier and use it here.
|
||||
private void retry(Runnable runnable) {
|
||||
int maxAttempts = 5;
|
||||
int initialDelayMillis = 100;
|
||||
double jitterRatio = 0.2;
|
||||
|
||||
for (int attempt = 0; attempt < maxAttempts; attempt++) {
|
||||
/**
|
||||
* Writes entities in a failed batch one by one to identify the first bad entity and throws a
|
||||
* {@link RuntimeException} on it.
|
||||
*/
|
||||
private void processSingly(ImmutableList<Object> ofyEntities) {
|
||||
for (Object ofyEntity : ofyEntities) {
|
||||
try {
|
||||
runnable.run();
|
||||
return;
|
||||
} catch (Throwable throwable) {
|
||||
if (!isFailedTxnRetriable(throwable)) {
|
||||
throwIfUnchecked(throwable);
|
||||
throw new RuntimeException(throwable);
|
||||
}
|
||||
int sleepMillis = (1 << attempt) * initialDelayMillis;
|
||||
int jitter =
|
||||
ThreadLocalRandom.current().nextInt((int) (sleepMillis * jitterRatio))
|
||||
- (int) (sleepMillis * jitterRatio / 2);
|
||||
sleeper.sleepUninterruptibly(Duration.millis(sleepMillis + jitter));
|
||||
jpaTm().transact(() -> jpaTm().put(ofyEntity));
|
||||
counter.inc();
|
||||
} catch (RuntimeException e) {
|
||||
throw new RuntimeException(toOfyKey(ofyEntity).toString(), e);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -80,7 +80,7 @@
|
||||
</cron>
|
||||
|
||||
<cron>
|
||||
<url><![CDATA[/_dr/task/resaveAllEppResources]]></url>
|
||||
<url><![CDATA[/_dr/task/resaveAllEppResources?fast=true]]></url>
|
||||
<description>
|
||||
This job resaves all our resources, projected in time to "now".
|
||||
It is needed for "deleteOldCommitLogs" to work correctly.
|
||||
|
||||
@@ -337,12 +337,18 @@
|
||||
<url-pattern>/_dr/task/refreshDnsOnHostRename</url-pattern>
|
||||
</servlet-mapping>
|
||||
|
||||
<!-- Mapreduce to expand recurring billing events into OneTimes. -->
|
||||
<!-- Mapreduce to expand recurring billing events into OneTimes. -->
|
||||
<servlet-mapping>
|
||||
<servlet-name>backend-servlet</servlet-name>
|
||||
<url-pattern>/_dr/task/expandRecurringBillingEvents</url-pattern>
|
||||
</servlet-mapping>
|
||||
|
||||
<!-- Background action to delete domains past end of autorenewal. -->
|
||||
<servlet-mapping>
|
||||
<servlet-name>backend-servlet</servlet-name>
|
||||
<url-pattern>/_dr/task/deleteExpiredDomains</url-pattern>
|
||||
</servlet-mapping>
|
||||
|
||||
<!-- Mapreduce to import contacts from escrow file -->
|
||||
<servlet-mapping>
|
||||
<servlet-name>backend-servlet</servlet-name>
|
||||
|
||||
@@ -25,11 +25,6 @@
|
||||
<property name="tld" direction="asc"/>
|
||||
<property name="creationTime" direction="desc"/>
|
||||
</datastore-index>
|
||||
<!-- For finding non-autorenewing domains to be deleted. -->
|
||||
<datastore-index kind="DomainBase" ancestor="false" source="manual">
|
||||
<property name="autorenewEndTime" direction="asc"/>
|
||||
<property name="deletionTime" direction="asc"/>
|
||||
</datastore-index>
|
||||
<!-- For finding host resources by registrar. -->
|
||||
<datastore-index kind="HostResource" ancestor="false" source="manual">
|
||||
<property name="currentSponsorClientId" direction="asc"/>
|
||||
|
||||
@@ -103,7 +103,7 @@
|
||||
</cron>
|
||||
|
||||
<cron>
|
||||
<url><![CDATA[/_dr/task/resaveAllEppResources]]></url>
|
||||
<url><![CDATA[/_dr/task/resaveAllEppResources?fast=true]]></url>
|
||||
<description>
|
||||
This job resaves all our resources, projected in time to "now".
|
||||
It is needed for "deleteOldCommitLogs" to work correctly.
|
||||
|
||||
@@ -21,7 +21,7 @@
|
||||
</cron>
|
||||
|
||||
<cron>
|
||||
<url><![CDATA[/_dr/task/resaveAllEppResources]]></url>
|
||||
<url><![CDATA[/_dr/task/resaveAllEppResources?fast=true]]></url>
|
||||
<description>
|
||||
This job resaves all our resources, projected in time to "now".
|
||||
It is needed for "deleteOldCommitLogs" to work correctly.
|
||||
|
||||
@@ -87,7 +87,7 @@
|
||||
</cron>
|
||||
|
||||
<cron>
|
||||
<url><![CDATA[/_dr/task/resaveAllEppResources]]></url>
|
||||
<url><![CDATA[/_dr/task/resaveAllEppResources?fast=true]]></url>
|
||||
<description>
|
||||
This job resaves all our resources, projected in time to "now".
|
||||
It is needed for "deleteOldCommitLogs" to work correctly.
|
||||
|
||||
@@ -45,13 +45,16 @@ import org.joda.time.Duration;
|
||||
/**
|
||||
* An action which polls the state of a bigquery job. If it is completed then it will log its
|
||||
* completion state; otherwise it will return a failure code so that the task will be retried.
|
||||
*
|
||||
* <p>Note that this is AUTH_INTERNAL_ONLY: we don't allow "admin" for this to mitigate a
|
||||
* vulnerability, see b/177308043.
|
||||
*/
|
||||
@Action(
|
||||
service = Action.Service.BACKEND,
|
||||
path = BigqueryPollJobAction.PATH,
|
||||
method = {Action.Method.GET, Action.Method.POST},
|
||||
automaticallyPrintOk = true,
|
||||
auth = Auth.AUTH_INTERNAL_OR_ADMIN)
|
||||
auth = Auth.AUTH_INTERNAL_ONLY)
|
||||
public class BigqueryPollJobAction implements Runnable {
|
||||
|
||||
private static final FluentLogger logger = FluentLogger.forEnclosingClass();
|
||||
|
||||
@@ -25,6 +25,7 @@ import com.google.common.flogger.FluentLogger;
|
||||
import com.google.common.net.MediaType;
|
||||
import google.registry.model.eppoutput.EppOutput;
|
||||
import google.registry.request.Response;
|
||||
import google.registry.util.ProxyHttpHeaders;
|
||||
import javax.inject.Inject;
|
||||
|
||||
/** Handle an EPP request and response. */
|
||||
@@ -72,7 +73,7 @@ public class EppRequestHandler {
|
||||
// See: https://tools.ietf.org/html/rfc5734#section-2
|
||||
if (eppOutput.isResponse()
|
||||
&& eppOutput.getResponse().getResult().getCode() == SUCCESS_AND_CLOSE) {
|
||||
response.setHeader("Epp-Session", "close");
|
||||
response.setHeader(ProxyHttpHeaders.EPP_SESSION, "close");
|
||||
}
|
||||
// If a login request returns a success, a logged-in header is added to the response to inform
|
||||
// the proxy that it is no longer necessary to send the full client certificate to the backend
|
||||
@@ -80,7 +81,7 @@ public class EppRequestHandler {
|
||||
if (eppOutput.isResponse()
|
||||
&& eppOutput.getResponse().isLoginResponse()
|
||||
&& eppOutput.isSuccess()) {
|
||||
response.setHeader("Logged-In", "true");
|
||||
response.setHeader(ProxyHttpHeaders.LOGGED_IN, "true");
|
||||
}
|
||||
} catch (Exception e) {
|
||||
logger.atWarning().withCause(e).log("handleEppCommand general exception");
|
||||
|
||||
@@ -22,5 +22,6 @@ public enum EppRequestSource {
|
||||
TLS,
|
||||
TOOL,
|
||||
CHECK_API,
|
||||
UNIT_TEST
|
||||
UNIT_TEST,
|
||||
BACKEND
|
||||
}
|
||||
|
||||
@@ -47,8 +47,8 @@ public final class ExtensionManager {
|
||||
|
||||
private static final FluentLogger logger = FluentLogger.forEnclosingClass();
|
||||
|
||||
/** Blacklist of extension URIs that cause an error if they are used without being declared. */
|
||||
private static final ImmutableSet<String> UNDECLARED_URIS_BLACKLIST = FEE_EXTENSION_URIS;
|
||||
/** Denylist of extension URIs that cause an error if they are used without being declared. */
|
||||
private static final ImmutableSet<String> UNDECLARED_URIS_DENYLIST = FEE_EXTENSION_URIS;
|
||||
|
||||
private final ImmutableSet.Builder<Class<? extends CommandExtension>> implementedBuilder =
|
||||
new ImmutableSet.Builder<>();
|
||||
@@ -95,7 +95,7 @@ public final class ExtensionManager {
|
||||
if (undeclaredUris.isEmpty()) {
|
||||
return;
|
||||
}
|
||||
Set<String> undeclaredUrisThatError = intersection(undeclaredUris, UNDECLARED_URIS_BLACKLIST);
|
||||
Set<String> undeclaredUrisThatError = intersection(undeclaredUris, UNDECLARED_URIS_DENYLIST);
|
||||
if (!undeclaredUrisThatError.isEmpty()) {
|
||||
throw new UndeclaredServiceExtensionException(undeclaredUrisThatError);
|
||||
}
|
||||
@@ -104,11 +104,14 @@ public final class ExtensionManager {
|
||||
clientId, flowClass.getSimpleName(), undeclaredUris);
|
||||
}
|
||||
|
||||
private static final ImmutableSet<EppRequestSource> ALLOWED_METADATA_EPP_REQUEST_SOURCES =
|
||||
ImmutableSet.of(EppRequestSource.TOOL, EppRequestSource.BACKEND);
|
||||
|
||||
private void checkForRestrictedExtensions(
|
||||
ImmutableSet<Class<? extends CommandExtension>> suppliedExtensions)
|
||||
throws OnlyToolCanPassMetadataException, UnauthorizedForSuperuserExtensionException {
|
||||
if (suppliedExtensions.contains(MetadataExtension.class)
|
||||
&& !eppRequestSource.equals(EppRequestSource.TOOL)) {
|
||||
&& !ALLOWED_METADATA_EPP_REQUEST_SOURCES.contains(eppRequestSource)) {
|
||||
throw new OnlyToolCanPassMetadataException();
|
||||
}
|
||||
// Can't use suppliedExtension.contains() here because the SuperuserExtension has child classes.
|
||||
|
||||
@@ -15,8 +15,8 @@
|
||||
package google.registry.flows;
|
||||
|
||||
import static com.google.common.collect.Sets.intersection;
|
||||
import static google.registry.model.EppResourceUtils.getLinkedDomainKeys;
|
||||
import static google.registry.model.EppResourceUtils.loadByForeignKey;
|
||||
import static google.registry.model.EppResourceUtils.queryForLinkedDomains;
|
||||
import static google.registry.model.index.ForeignKeyIndex.loadAndGetKey;
|
||||
import static google.registry.model.ofy.ObjectifyService.ofy;
|
||||
import static google.registry.persistence.transaction.TransactionManagerFactory.tm;
|
||||
@@ -94,14 +94,13 @@ public final class ResourceFlowUtils {
|
||||
* actual reference then we can reliably fail. If we don't find any, we can't
|
||||
* trust the query and need to do the full mapreduce.
|
||||
*/
|
||||
Iterable<Key<DomainBase>> keys =
|
||||
queryForLinkedDomains(fki.getResourceKey().getOfyKey(), now)
|
||||
.limit(FAILFAST_CHECK_COUNT)
|
||||
.keys();
|
||||
Iterable<VKey<DomainBase>> keys =
|
||||
getLinkedDomainKeys(fki.getResourceKey(), now, FAILFAST_CHECK_COUNT);
|
||||
|
||||
VKey<R> resourceVKey = fki.getResourceKey();
|
||||
Predicate<DomainBase> predicate =
|
||||
domain -> getPotentialReferences.apply(domain).contains(resourceVKey);
|
||||
return ofy().load().keys(keys).values().stream().anyMatch(predicate)
|
||||
return tm().loadByKeys(keys).values().stream().anyMatch(predicate)
|
||||
? new ResourceToDeleteIsReferencedException()
|
||||
: null;
|
||||
});
|
||||
@@ -139,7 +138,7 @@ public final class ResourceFlowUtils {
|
||||
Class<R> clazz, String targetId, DateTime now, String clientId) throws EppException {
|
||||
VKey<R> key = loadAndGetKey(clazz, targetId, now);
|
||||
if (key != null) {
|
||||
R resource = tm().load(key);
|
||||
R resource = tm().loadByKey(key);
|
||||
// These are similar exceptions, but we can track them internally as log-based metrics.
|
||||
if (Objects.equals(clientId, resource.getPersistedCurrentSponsorClientId())) {
|
||||
throw new ResourceAlreadyExistsForThisClientException(targetId);
|
||||
|
||||
@@ -15,9 +15,8 @@
|
||||
package google.registry.flows;
|
||||
|
||||
import static com.google.common.base.MoreObjects.toStringHelper;
|
||||
import static com.google.common.base.Strings.isNullOrEmpty;
|
||||
import static google.registry.request.RequestParameters.extractOptionalHeader;
|
||||
import static google.registry.request.RequestParameters.extractRequiredHeader;
|
||||
import static google.registry.util.X509Utils.loadCertificate;
|
||||
|
||||
import com.google.common.annotations.VisibleForTesting;
|
||||
import com.google.common.collect.ImmutableList;
|
||||
@@ -27,14 +26,24 @@ import com.google.common.net.InetAddresses;
|
||||
import dagger.Module;
|
||||
import dagger.Provides;
|
||||
import google.registry.config.RegistryConfig.Config;
|
||||
import google.registry.config.RegistryEnvironment;
|
||||
import google.registry.flows.EppException.AuthenticationErrorException;
|
||||
import google.registry.flows.certs.CertificateChecker;
|
||||
import google.registry.flows.certs.CertificateChecker.InsecureCertificateException;
|
||||
import google.registry.model.registrar.Registrar;
|
||||
import google.registry.request.Header;
|
||||
import google.registry.util.CidrAddressBlock;
|
||||
import google.registry.util.Clock;
|
||||
import google.registry.util.ProxyHttpHeaders;
|
||||
import java.io.ByteArrayInputStream;
|
||||
import java.net.InetAddress;
|
||||
import java.security.cert.CertificateException;
|
||||
import java.security.cert.X509Certificate;
|
||||
import java.util.Base64;
|
||||
import java.util.Optional;
|
||||
import javax.inject.Inject;
|
||||
import javax.servlet.http.HttpServletRequest;
|
||||
import org.joda.time.DateTime;
|
||||
|
||||
/**
|
||||
* Container and validation for TLS certificate and IP-allow-listing.
|
||||
@@ -44,6 +53,9 @@ import javax.servlet.http.HttpServletRequest;
|
||||
* <dl>
|
||||
* <dt>X-SSL-Certificate
|
||||
* <dd>This field should contain a base64 encoded digest of the client's TLS certificate. It is
|
||||
* used only if the validation of the full certificate fails.
|
||||
* <dt>X-SSL-Full-Certificate
|
||||
* <dd>This field should contain a base64 encoding of the client's TLS certificate. It is
|
||||
* validated during an EPP login command against a known good value that is transmitted out of
|
||||
* band.
|
||||
* <dt>X-Forwarded-For
|
||||
@@ -54,19 +66,30 @@ import javax.servlet.http.HttpServletRequest;
|
||||
public class TlsCredentials implements TransportCredentials {
|
||||
|
||||
private static final FluentLogger logger = FluentLogger.forEnclosingClass();
|
||||
private static final DateTime CERT_ENFORCEMENT_START_TIME =
|
||||
DateTime.parse("2021-03-01T16:00:00Z");
|
||||
|
||||
private final boolean requireSslCertificates;
|
||||
private final String clientCertificateHash;
|
||||
private final InetAddress clientInetAddr;
|
||||
private final Optional<String> clientCertificateHash;
|
||||
private final Optional<String> clientCertificate;
|
||||
private final Optional<InetAddress> clientInetAddr;
|
||||
private final CertificateChecker certificateChecker;
|
||||
private final Clock clock;
|
||||
|
||||
@Inject
|
||||
public TlsCredentials(
|
||||
@Config("requireSslCertificates") boolean requireSslCertificates,
|
||||
@Header("X-SSL-Certificate") String clientCertificateHash,
|
||||
@Header("X-Forwarded-For") Optional<String> clientAddress) {
|
||||
@Header(ProxyHttpHeaders.CERTIFICATE_HASH) Optional<String> clientCertificateHash,
|
||||
@Header(ProxyHttpHeaders.FULL_CERTIFICATE) Optional<String> clientCertificate,
|
||||
@Header(ProxyHttpHeaders.IP_ADDRESS) Optional<String> clientAddress,
|
||||
CertificateChecker certificateChecker,
|
||||
Clock clock) {
|
||||
this.requireSslCertificates = requireSslCertificates;
|
||||
this.clientCertificateHash = clientCertificateHash;
|
||||
this.clientInetAddr = clientAddress.isPresent() ? parseInetAddress(clientAddress.get()) : null;
|
||||
this.clientCertificate = clientCertificate;
|
||||
this.clientInetAddr = clientAddress.map(TlsCredentials::parseInetAddress);
|
||||
this.certificateChecker = certificateChecker;
|
||||
this.clock = clock;
|
||||
}
|
||||
|
||||
static InetAddress parseInetAddress(String asciiAddr) {
|
||||
@@ -93,14 +116,18 @@ public class TlsCredentials implements TransportCredentials {
|
||||
ImmutableList<CidrAddressBlock> ipAddressAllowList = registrar.getIpAddressAllowList();
|
||||
if (ipAddressAllowList.isEmpty()) {
|
||||
logger.atInfo().log(
|
||||
"Skipping IP allow list check because %s doesn't have an IP allow list",
|
||||
"Skipping IP allow list check because %s doesn't have an IP allow list.",
|
||||
registrar.getClientId());
|
||||
return;
|
||||
}
|
||||
for (CidrAddressBlock cidrAddressBlock : ipAddressAllowList) {
|
||||
if (cidrAddressBlock.contains(clientInetAddr)) {
|
||||
// IP address is in allow list; return early.
|
||||
return;
|
||||
// In the rare unexpected case that the client inet address wasn't passed along at all, then
|
||||
// by default deny access.
|
||||
if (clientInetAddr.isPresent()) {
|
||||
for (CidrAddressBlock cidrAddressBlock : ipAddressAllowList) {
|
||||
if (cidrAddressBlock.contains(clientInetAddr.get())) {
|
||||
// IP address is in allow list; return early.
|
||||
return;
|
||||
}
|
||||
}
|
||||
}
|
||||
logger.atInfo().log(
|
||||
@@ -113,13 +140,88 @@ public class TlsCredentials implements TransportCredentials {
|
||||
/**
|
||||
* Verifies client SSL certificate is permitted to issue commands as {@code registrar}.
|
||||
*
|
||||
* @throws MissingRegistrarCertificateException if frontend didn't send certificate hash header
|
||||
* @throws MissingRegistrarCertificateException if frontend didn't send certificate header
|
||||
* @throws BadRegistrarCertificateException if registrar requires certificate and it didn't match
|
||||
*/
|
||||
@VisibleForTesting
|
||||
void validateCertificate(Registrar registrar) throws AuthenticationErrorException {
|
||||
if (isNullOrEmpty(registrar.getClientCertificateHash())
|
||||
&& isNullOrEmpty(registrar.getFailoverClientCertificateHash())) {
|
||||
// Check that certificate is present in registrar object
|
||||
if (!registrar.getClientCertificate().isPresent()
|
||||
&& !registrar.getFailoverClientCertificate().isPresent()) {
|
||||
// Log an error and validate using certificate hash instead
|
||||
// TODO(sarahbot): throw a RegistrarCertificateNotConfiguredException once hash is no longer
|
||||
// used as failover
|
||||
logger.atWarning().log(
|
||||
"There is no certificate configured for registrar %s.", registrar.getClientId());
|
||||
} else if (!clientCertificate.isPresent()) {
|
||||
// Check that the request included the full certificate
|
||||
// Log an error and validate using certificate hash instead
|
||||
// TODO(sarahbot): throw a MissingRegistrarCertificateException once hash is no longer used as
|
||||
// failover
|
||||
logger.atWarning().log(
|
||||
"Request from registrar %s did not include X-SSL-Full-Certificate.",
|
||||
registrar.getClientId());
|
||||
} else {
|
||||
X509Certificate passedCert;
|
||||
Optional<X509Certificate> storedCert;
|
||||
Optional<X509Certificate> storedFailoverCert;
|
||||
|
||||
try {
|
||||
storedCert = deserializePemCert(registrar.getClientCertificate());
|
||||
storedFailoverCert = deserializePemCert(registrar.getFailoverClientCertificate());
|
||||
passedCert = decodeCertString(clientCertificate.get());
|
||||
} catch (Exception e) {
|
||||
// TODO(Sarahbot@): remove this catch once we know it's working
|
||||
logger.atWarning().log(
|
||||
"Error converting certificate string to certificate for %s: %s",
|
||||
registrar.getClientId(), e);
|
||||
validateCertificateHash(registrar);
|
||||
return;
|
||||
}
|
||||
|
||||
// Check if the certificate is equal to the one on file for the registrar.
|
||||
if (passedCert.equals(storedCert.orElse(null))
|
||||
|| passedCert.equals(storedFailoverCert.orElse(null))) {
|
||||
// Check certificate for any requirement violations
|
||||
// TODO(Sarahbot@): Throw exceptions instead of just logging once requirement enforcement
|
||||
// begins
|
||||
try {
|
||||
certificateChecker.validateCertificate(passedCert);
|
||||
} catch (InsecureCertificateException e) {
|
||||
// TODO(Sarahbot@): Remove this if statement after March 1. After March 1, exception
|
||||
// should be thrown in all environments.
|
||||
// throw exception in unit tests and Sandbox
|
||||
if (RegistryEnvironment.get().equals(RegistryEnvironment.UNITTEST)
|
||||
|| RegistryEnvironment.get().equals(RegistryEnvironment.SANDBOX)
|
||||
|| clock.nowUtc().isAfter(CERT_ENFORCEMENT_START_TIME)) {
|
||||
throw new CertificateContainsSecurityViolationsException(e);
|
||||
}
|
||||
logger.atWarning().log(
|
||||
"Registrar certificate used for %s does not meet certificate requirements: %s",
|
||||
registrar.getClientId(), e.getMessage());
|
||||
} catch (Exception e) {
|
||||
logger.atWarning().log(
|
||||
"Error validating certificate for %s: %s", registrar.getClientId(), e);
|
||||
}
|
||||
// successfully validated, return here since hash validation is not necessary
|
||||
return;
|
||||
}
|
||||
// Log an error and validate using certificate hash instead
|
||||
// TODO(sarahbot): throw a BadRegistrarCertificateException once hash is no longer used as
|
||||
// failover
|
||||
logger.atWarning().log("Non-matching certificate for registrar %s.", registrar.getClientId());
|
||||
}
|
||||
validateCertificateHash(registrar);
|
||||
}
|
||||
|
||||
private void validateCertificateHash(Registrar registrar) throws AuthenticationErrorException {
|
||||
logger.atWarning().log(
|
||||
"Error validating certificate for %s, attempting to validate using certificate hash.",
|
||||
registrar.getClientId());
|
||||
// Check the certificate hash as a failover
|
||||
// TODO(sarahbot): Remove hash checks once certificate checks are working.
|
||||
if (!registrar.getClientCertificateHash().isPresent()
|
||||
&& !registrar.getFailoverClientCertificateHash().isPresent()) {
|
||||
if (requireSslCertificates) {
|
||||
throw new RegistrarCertificateNotConfiguredException();
|
||||
} else {
|
||||
@@ -128,14 +230,17 @@ public class TlsCredentials implements TransportCredentials {
|
||||
return;
|
||||
}
|
||||
}
|
||||
if (isNullOrEmpty(clientCertificateHash)) {
|
||||
logger.atInfo().log("Request did not include X-SSL-Certificate");
|
||||
// Check that the request included the certificate hash
|
||||
if (!clientCertificateHash.isPresent()) {
|
||||
logger.atInfo().log(
|
||||
"Request from registrar %s did not include X-SSL-Certificate.", registrar.getClientId());
|
||||
throw new MissingRegistrarCertificateException();
|
||||
}
|
||||
// Check if the certificate hash is equal to the one on file for the registrar.
|
||||
if (!clientCertificateHash.equals(registrar.getClientCertificateHash())
|
||||
&& !clientCertificateHash.equals(registrar.getFailoverClientCertificateHash())) {
|
||||
logger.atWarning().log(
|
||||
"bad certificate hash (%s) for %s, wanted either %s or %s",
|
||||
"Non-matching certificate hash (%s) for %s, wanted either %s or %s.",
|
||||
clientCertificateHash,
|
||||
registrar.getClientId(),
|
||||
registrar.getClientCertificateHash(),
|
||||
@@ -151,24 +256,55 @@ public class TlsCredentials implements TransportCredentials {
|
||||
}
|
||||
}
|
||||
|
||||
// Converts a PEM formatted certificate string into an X509Certificate
|
||||
private Optional<X509Certificate> deserializePemCert(Optional<String> certificateString)
|
||||
throws CertificateException {
|
||||
if (certificateString.isPresent()) {
|
||||
return Optional.of(loadCertificate(certificateString.get()));
|
||||
}
|
||||
return Optional.empty();
|
||||
}
|
||||
|
||||
// Decodes the string representation of an encoded certificate back into an X509Certificate
|
||||
private X509Certificate decodeCertString(String encodedCertString) throws CertificateException {
|
||||
byte decodedCert[] = Base64.getDecoder().decode(encodedCertString);
|
||||
ByteArrayInputStream inputStream = new ByteArrayInputStream(decodedCert);
|
||||
return loadCertificate(inputStream);
|
||||
}
|
||||
|
||||
@Override
|
||||
public String toString() {
|
||||
return toStringHelper(getClass())
|
||||
.add("clientCertificateHash", clientCertificateHash)
|
||||
.add("clientAddress", clientInetAddr)
|
||||
.add("clientCertificate", clientCertificate.orElse(null))
|
||||
.add("clientCertificateHash", clientCertificateHash.orElse(null))
|
||||
.add("clientAddress", clientInetAddr.orElse(null))
|
||||
.toString();
|
||||
}
|
||||
|
||||
/** Registrar certificate does not match stored certificate. */
|
||||
public static class BadRegistrarCertificateException extends AuthenticationErrorException {
|
||||
public BadRegistrarCertificateException() {
|
||||
BadRegistrarCertificateException() {
|
||||
super("Registrar certificate does not match stored certificate");
|
||||
}
|
||||
}
|
||||
|
||||
/** Registrar certificate contains the following security violations: ... */
|
||||
public static class CertificateContainsSecurityViolationsException
|
||||
extends AuthenticationErrorException {
|
||||
InsecureCertificateException exception;
|
||||
|
||||
CertificateContainsSecurityViolationsException(InsecureCertificateException exception) {
|
||||
super(
|
||||
String.format(
|
||||
"Registrar certificate contains the following security violations:\n%s",
|
||||
exception.getMessage()));
|
||||
this.exception = exception;
|
||||
}
|
||||
}
|
||||
|
||||
/** Registrar certificate not present. */
|
||||
public static class MissingRegistrarCertificateException extends AuthenticationErrorException {
|
||||
public MissingRegistrarCertificateException() {
|
||||
MissingRegistrarCertificateException() {
|
||||
super("Registrar certificate not present");
|
||||
}
|
||||
}
|
||||
@@ -176,14 +312,14 @@ public class TlsCredentials implements TransportCredentials {
|
||||
/** Registrar certificate is not configured. */
|
||||
public static class RegistrarCertificateNotConfiguredException
|
||||
extends AuthenticationErrorException {
|
||||
public RegistrarCertificateNotConfiguredException() {
|
||||
RegistrarCertificateNotConfiguredException() {
|
||||
super("Registrar certificate is not configured");
|
||||
}
|
||||
}
|
||||
|
||||
/** Registrar IP address is not in stored allow list. */
|
||||
public static class BadRegistrarIpAddressException extends AuthenticationErrorException {
|
||||
public BadRegistrarIpAddressException() {
|
||||
BadRegistrarIpAddressException() {
|
||||
super("Registrar IP address is not in stored allow list");
|
||||
}
|
||||
}
|
||||
@@ -191,16 +327,27 @@ public class TlsCredentials implements TransportCredentials {
|
||||
/** Dagger module for the EPP TLS endpoint. */
|
||||
@Module
|
||||
public static final class EppTlsModule {
|
||||
|
||||
@Provides
|
||||
@Header("X-SSL-Certificate")
|
||||
static String provideClientCertificateHash(HttpServletRequest req) {
|
||||
return extractRequiredHeader(req, "X-SSL-Certificate");
|
||||
@Header(ProxyHttpHeaders.CERTIFICATE_HASH)
|
||||
static Optional<String> provideClientCertificateHash(HttpServletRequest req) {
|
||||
// Note: This header is actually required, we just want to handle its absence explicitly
|
||||
// by throwing an EPP exception rather than a generic Bad Request exception.
|
||||
return extractOptionalHeader(req, ProxyHttpHeaders.CERTIFICATE_HASH);
|
||||
}
|
||||
|
||||
@Provides
|
||||
@Header("X-Forwarded-For")
|
||||
static Optional<String> provideForwardedFor(HttpServletRequest req) {
|
||||
return extractOptionalHeader(req, "X-Forwarded-For");
|
||||
@Header(ProxyHttpHeaders.FULL_CERTIFICATE)
|
||||
static Optional<String> provideClientCertificate(HttpServletRequest req) {
|
||||
// Note: This header is actually required, we just want to handle its absence explicitly
|
||||
// by throwing an EPP exception rather than a generic Bad Request exception.
|
||||
return extractOptionalHeader(req, ProxyHttpHeaders.FULL_CERTIFICATE);
|
||||
}
|
||||
|
||||
@Provides
|
||||
@Header(ProxyHttpHeaders.IP_ADDRESS)
|
||||
static Optional<String> provideIpAddress(HttpServletRequest req) {
|
||||
return extractOptionalHeader(req, ProxyHttpHeaders.IP_ADDRESS);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -89,14 +89,26 @@ public class CertificateChecker {
|
||||
* Checks the given certificate string for violations and throws an exception if any violations
|
||||
* exist.
|
||||
*/
|
||||
public void validateCertificate(String certificateString) {
|
||||
ImmutableSet<CertificateViolation> violations = checkCertificate(certificateString);
|
||||
public void validateCertificate(String certificateString) throws InsecureCertificateException {
|
||||
handleCertViolations(checkCertificate(certificateString));
|
||||
}
|
||||
|
||||
/**
|
||||
* Checks the given certificate string for violations and throws an exception if any violations
|
||||
* exist.
|
||||
*/
|
||||
public void validateCertificate(X509Certificate certificate) throws InsecureCertificateException {
|
||||
handleCertViolations(checkCertificate(certificate));
|
||||
}
|
||||
|
||||
private void handleCertViolations(ImmutableSet<CertificateViolation> violations)
|
||||
throws InsecureCertificateException {
|
||||
if (!violations.isEmpty()) {
|
||||
String displayMessages =
|
||||
violations.stream()
|
||||
.map(violation -> getViolationDisplayMessage(violation))
|
||||
.collect(Collectors.joining("\n"));
|
||||
throw new IllegalArgumentException(displayMessages);
|
||||
throw new InsecureCertificateException(violations, displayMessages);
|
||||
}
|
||||
}
|
||||
|
||||
@@ -258,4 +270,14 @@ public class CertificateChecker {
|
||||
return certificateChecker.getViolationDisplayMessage(this);
|
||||
}
|
||||
}
|
||||
|
||||
/** Exception to throw when a certificate has security violations. */
|
||||
public static class InsecureCertificateException extends Exception {
|
||||
ImmutableSet<CertificateViolation> violations;
|
||||
|
||||
InsecureCertificateException(ImmutableSet<CertificateViolation> violations, String message) {
|
||||
super(message);
|
||||
this.violations = violations;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -19,9 +19,9 @@ import static google.registry.flows.ResourceFlowUtils.verifyResourceDoesNotExist
|
||||
import static google.registry.flows.contact.ContactFlowUtils.validateAsciiPostalInfo;
|
||||
import static google.registry.flows.contact.ContactFlowUtils.validateContactAgainstPolicy;
|
||||
import static google.registry.model.EppResourceUtils.createRepoId;
|
||||
import static google.registry.model.ofy.ObjectifyService.ofy;
|
||||
import static google.registry.persistence.transaction.TransactionManagerFactory.tm;
|
||||
|
||||
import com.google.common.collect.ImmutableSet;
|
||||
import com.googlecode.objectify.Key;
|
||||
import google.registry.config.RegistryConfig.Config;
|
||||
import google.registry.flows.EppException;
|
||||
@@ -95,11 +95,12 @@ public final class ContactCreateFlow implements TransactionalFlow {
|
||||
.setModificationTime(now)
|
||||
.setXmlBytes(null) // We don't want to store contact details in the history entry.
|
||||
.setParent(Key.create(newContact));
|
||||
ofy().save().entities(
|
||||
newContact,
|
||||
historyBuilder.build(),
|
||||
ForeignKeyIndex.create(newContact, newContact.getDeletionTime()),
|
||||
EppResourceIndex.create(Key.create(newContact)));
|
||||
tm().insertAll(
|
||||
ImmutableSet.of(
|
||||
newContact,
|
||||
historyBuilder.build().toChildHistoryEntity(),
|
||||
ForeignKeyIndex.create(newContact, newContact.getDeletionTime()),
|
||||
EppResourceIndex.create(Key.create(newContact))));
|
||||
return responseBuilder
|
||||
.setResData(ContactCreateData.create(newContact.getContactId(), now))
|
||||
.build();
|
||||
|
||||
@@ -21,7 +21,6 @@ import static google.registry.flows.ResourceFlowUtils.verifyNoDisallowedStatuses
|
||||
import static google.registry.flows.ResourceFlowUtils.verifyOptionalAuthInfo;
|
||||
import static google.registry.flows.ResourceFlowUtils.verifyResourceOwnership;
|
||||
import static google.registry.model.eppoutput.Result.Code.SUCCESS_WITH_ACTION_PENDING;
|
||||
import static google.registry.model.ofy.ObjectifyService.ofy;
|
||||
import static google.registry.persistence.transaction.TransactionManagerFactory.tm;
|
||||
|
||||
import com.google.common.collect.ImmutableSet;
|
||||
@@ -101,7 +100,8 @@ public final class ContactDeleteFlow implements TransactionalFlow {
|
||||
.setType(HistoryEntry.Type.CONTACT_PENDING_DELETE)
|
||||
.setModificationTime(now)
|
||||
.setParent(Key.create(existingContact));
|
||||
ofy().save().<Object>entities(newContact, historyBuilder.build());
|
||||
tm().insert(historyBuilder.build().toChildHistoryEntity());
|
||||
tm().update(newContact);
|
||||
return responseBuilder.setResultFromCode(SUCCESS_WITH_ACTION_PENDING).build();
|
||||
}
|
||||
}
|
||||
|
||||
@@ -20,7 +20,6 @@ import static google.registry.flows.ResourceFlowUtils.verifyResourceOwnership;
|
||||
import static google.registry.model.EppResourceUtils.isLinked;
|
||||
|
||||
import com.google.common.collect.ImmutableSet;
|
||||
import com.googlecode.objectify.Key;
|
||||
import google.registry.flows.EppException;
|
||||
import google.registry.flows.ExtensionManager;
|
||||
import google.registry.flows.Flow;
|
||||
@@ -77,7 +76,7 @@ public final class ContactInfoFlow implements Flow {
|
||||
clientId.equals(contact.getCurrentSponsorClientId()) || authInfo.isPresent();
|
||||
ImmutableSet.Builder<StatusValue> statusValues = new ImmutableSet.Builder<>();
|
||||
statusValues.addAll(contact.getStatusValues());
|
||||
if (isLinked(Key.create(contact), now)) {
|
||||
if (isLinked(contact.createVKey(), now)) {
|
||||
statusValues.add(StatusValue.LINKED);
|
||||
}
|
||||
return responseBuilder
|
||||
|
||||
@@ -22,9 +22,9 @@ import static google.registry.flows.ResourceFlowUtils.verifyResourceOwnership;
|
||||
import static google.registry.flows.contact.ContactFlowUtils.createGainingTransferPollMessage;
|
||||
import static google.registry.flows.contact.ContactFlowUtils.createTransferResponse;
|
||||
import static google.registry.model.ResourceTransferUtils.approvePendingTransfer;
|
||||
import static google.registry.model.ofy.ObjectifyService.ofy;
|
||||
import static google.registry.persistence.transaction.TransactionManagerFactory.tm;
|
||||
|
||||
import com.google.common.collect.ImmutableSet;
|
||||
import com.googlecode.objectify.Key;
|
||||
import google.registry.flows.EppException;
|
||||
import google.registry.flows.ExtensionManager;
|
||||
@@ -94,7 +94,8 @@ public final class ContactTransferApproveFlow implements TransactionalFlow {
|
||||
// Create a poll message for the gaining client.
|
||||
PollMessage gainingPollMessage =
|
||||
createGainingTransferPollMessage(targetId, newContact.getTransferData(), historyEntry);
|
||||
ofy().save().<Object>entities(newContact, historyEntry, gainingPollMessage);
|
||||
tm().insertAll(ImmutableSet.of(historyEntry.toChildHistoryEntity(), gainingPollMessage));
|
||||
tm().update(newContact);
|
||||
// Delete the billing event and poll messages that were written in case the transfer would have
|
||||
// been implicitly server approved.
|
||||
tm().delete(existingContact.getTransferData().getServerApproveEntities());
|
||||
|
||||
@@ -22,9 +22,9 @@ import static google.registry.flows.ResourceFlowUtils.verifyTransferInitiator;
|
||||
import static google.registry.flows.contact.ContactFlowUtils.createLosingTransferPollMessage;
|
||||
import static google.registry.flows.contact.ContactFlowUtils.createTransferResponse;
|
||||
import static google.registry.model.ResourceTransferUtils.denyPendingTransfer;
|
||||
import static google.registry.model.ofy.ObjectifyService.ofy;
|
||||
import static google.registry.persistence.transaction.TransactionManagerFactory.tm;
|
||||
|
||||
import com.google.common.collect.ImmutableSet;
|
||||
import com.googlecode.objectify.Key;
|
||||
import google.registry.flows.EppException;
|
||||
import google.registry.flows.ExtensionManager;
|
||||
@@ -90,7 +90,8 @@ public final class ContactTransferCancelFlow implements TransactionalFlow {
|
||||
// Create a poll message for the losing client.
|
||||
PollMessage losingPollMessage =
|
||||
createLosingTransferPollMessage(targetId, newContact.getTransferData(), historyEntry);
|
||||
ofy().save().<Object>entities(newContact, historyEntry, losingPollMessage);
|
||||
tm().insertAll(ImmutableSet.of(historyEntry.toChildHistoryEntity(), losingPollMessage));
|
||||
tm().update(newContact);
|
||||
// Delete the billing event and poll messages that were written in case the transfer would have
|
||||
// been implicitly server approved.
|
||||
tm().delete(existingContact.getTransferData().getServerApproveEntities());
|
||||
|
||||
@@ -22,9 +22,9 @@ import static google.registry.flows.ResourceFlowUtils.verifyResourceOwnership;
|
||||
import static google.registry.flows.contact.ContactFlowUtils.createGainingTransferPollMessage;
|
||||
import static google.registry.flows.contact.ContactFlowUtils.createTransferResponse;
|
||||
import static google.registry.model.ResourceTransferUtils.denyPendingTransfer;
|
||||
import static google.registry.model.ofy.ObjectifyService.ofy;
|
||||
import static google.registry.persistence.transaction.TransactionManagerFactory.tm;
|
||||
|
||||
import com.google.common.collect.ImmutableSet;
|
||||
import com.googlecode.objectify.Key;
|
||||
import google.registry.flows.EppException;
|
||||
import google.registry.flows.ExtensionManager;
|
||||
@@ -87,7 +87,8 @@ public final class ContactTransferRejectFlow implements TransactionalFlow {
|
||||
.build();
|
||||
PollMessage gainingPollMessage =
|
||||
createGainingTransferPollMessage(targetId, newContact.getTransferData(), historyEntry);
|
||||
ofy().save().<Object>entities(newContact, historyEntry, gainingPollMessage);
|
||||
tm().insertAll(ImmutableSet.of(historyEntry.toChildHistoryEntity(), gainingPollMessage));
|
||||
tm().update(newContact);
|
||||
// Delete the billing event and poll messages that were written in case the transfer would have
|
||||
// been implicitly server approved.
|
||||
tm().delete(existingContact.getTransferData().getServerApproveEntities());
|
||||
|
||||
@@ -23,7 +23,6 @@ import static google.registry.flows.contact.ContactFlowUtils.createGainingTransf
|
||||
import static google.registry.flows.contact.ContactFlowUtils.createLosingTransferPollMessage;
|
||||
import static google.registry.flows.contact.ContactFlowUtils.createTransferResponse;
|
||||
import static google.registry.model.eppoutput.Result.Code.SUCCESS_WITH_ACTION_PENDING;
|
||||
import static google.registry.model.ofy.ObjectifyService.ofy;
|
||||
import static google.registry.persistence.transaction.TransactionManagerFactory.tm;
|
||||
|
||||
import com.google.common.collect.ImmutableSet;
|
||||
@@ -145,12 +144,13 @@ public final class ContactTransferRequestFlow implements TransactionalFlow {
|
||||
.setTransferData(pendingTransferData)
|
||||
.addStatusValue(StatusValue.PENDING_TRANSFER)
|
||||
.build();
|
||||
ofy().save().<Object>entities(
|
||||
newContact,
|
||||
historyEntry,
|
||||
requestPollMessage,
|
||||
serverApproveGainingPollMessage,
|
||||
serverApproveLosingPollMessage);
|
||||
tm().update(newContact);
|
||||
tm().insertAll(
|
||||
ImmutableSet.of(
|
||||
historyEntry.toChildHistoryEntity(),
|
||||
requestPollMessage,
|
||||
serverApproveGainingPollMessage,
|
||||
serverApproveLosingPollMessage));
|
||||
return responseBuilder
|
||||
.setResultFromCode(SUCCESS_WITH_ACTION_PENDING)
|
||||
.setResData(createTransferResponse(targetId, newContact.getTransferData()))
|
||||
|
||||
@@ -24,7 +24,6 @@ import static google.registry.flows.ResourceFlowUtils.verifyOptionalAuthInfo;
|
||||
import static google.registry.flows.ResourceFlowUtils.verifyResourceOwnership;
|
||||
import static google.registry.flows.contact.ContactFlowUtils.validateAsciiPostalInfo;
|
||||
import static google.registry.flows.contact.ContactFlowUtils.validateContactAgainstPolicy;
|
||||
import static google.registry.model.ofy.ObjectifyService.ofy;
|
||||
import static google.registry.persistence.transaction.TransactionManagerFactory.tm;
|
||||
|
||||
import com.google.common.collect.ImmutableSet;
|
||||
@@ -151,7 +150,8 @@ public final class ContactUpdateFlow implements TransactionalFlow {
|
||||
}
|
||||
validateAsciiPostalInfo(newContact.getInternationalizedPostalInfo());
|
||||
validateContactAgainstPolicy(newContact);
|
||||
ofy().save().<Object>entities(newContact, historyBuilder.build());
|
||||
tm().insert(historyBuilder.build().toChildHistoryEntity());
|
||||
tm().update(newContact);
|
||||
return responseBuilder.build();
|
||||
}
|
||||
|
||||
|
||||
@@ -110,7 +110,6 @@ import google.registry.model.reporting.DomainTransactionRecord;
|
||||
import google.registry.model.reporting.DomainTransactionRecord.TransactionReportField;
|
||||
import google.registry.model.reporting.HistoryEntry;
|
||||
import google.registry.model.reporting.IcannReportingTypes.ActivityReportField;
|
||||
import google.registry.persistence.DomainHistoryVKey;
|
||||
import google.registry.persistence.VKey;
|
||||
import google.registry.tmch.LordnTaskUtils;
|
||||
import java.util.Optional;
|
||||
@@ -372,7 +371,7 @@ public class DomainCreateFlow implements TransactionalFlow {
|
||||
&& TokenType.SINGLE_USE.equals(allocationToken.get().getTokenType())) {
|
||||
entitiesToSave.add(
|
||||
allocationTokenFlowUtils.redeemToken(
|
||||
allocationToken.get(), DomainHistoryVKey.create(Key.create(historyEntry))));
|
||||
allocationToken.get(), HistoryEntry.createVKey(Key.create(historyEntry))));
|
||||
}
|
||||
enqueueTasks(newDomain, hasSignedMarks, hasClaimsNotice);
|
||||
|
||||
|
||||
@@ -225,7 +225,7 @@ public final class DomainDeleteFlow implements TransactionalFlow {
|
||||
if (gracePeriod.getOneTimeBillingEvent() != null) {
|
||||
// Take the amount of amount of registration time being refunded off the expiration time.
|
||||
// This can be either add grace periods or renew grace periods.
|
||||
BillingEvent.OneTime oneTime = tm().load(gracePeriod.getOneTimeBillingEvent());
|
||||
BillingEvent.OneTime oneTime = tm().loadByKey(gracePeriod.getOneTimeBillingEvent());
|
||||
newExpirationTime = newExpirationTime.minusYears(oneTime.getPeriodYears());
|
||||
} else if (gracePeriod.getRecurringBillingEvent() != null) {
|
||||
// Take 1 year off the registration if in the autorenew grace period (no need to load the
|
||||
@@ -372,12 +372,12 @@ public final class DomainDeleteFlow implements TransactionalFlow {
|
||||
private Money getGracePeriodCost(GracePeriod gracePeriod, DateTime now) {
|
||||
if (gracePeriod.getType() == GracePeriodStatus.AUTO_RENEW) {
|
||||
DateTime autoRenewTime =
|
||||
tm().load(checkNotNull(gracePeriod.getRecurringBillingEvent()))
|
||||
tm().loadByKey(checkNotNull(gracePeriod.getRecurringBillingEvent()))
|
||||
.getRecurrenceTimeOfYear()
|
||||
.getLastInstanceBeforeOrAt(now);
|
||||
return getDomainRenewCost(targetId, autoRenewTime, 1);
|
||||
}
|
||||
return tm().load(checkNotNull(gracePeriod.getOneTimeBillingEvent())).getCost();
|
||||
return tm().loadByKey(checkNotNull(gracePeriod.getOneTimeBillingEvent())).getCost();
|
||||
}
|
||||
|
||||
@Nullable
|
||||
|
||||
@@ -517,7 +517,7 @@ public class DomainFlowUtils {
|
||||
*/
|
||||
public static void updateAutorenewRecurrenceEndTime(DomainBase domain, DateTime newEndTime) {
|
||||
Optional<PollMessage.Autorenew> autorenewPollMessage =
|
||||
tm().maybeLoad(domain.getAutorenewPollMessage());
|
||||
tm().loadByKeyIfPresent(domain.getAutorenewPollMessage());
|
||||
|
||||
// Construct an updated autorenew poll message. If the autorenew poll message no longer exists,
|
||||
// create a new one at the same id. This can happen if a transfer was requested on a domain
|
||||
@@ -542,7 +542,7 @@ public class DomainFlowUtils {
|
||||
ofy().save().entity(updatedAutorenewPollMessage);
|
||||
}
|
||||
|
||||
Recurring recurring = tm().load(domain.getAutorenewBillingEvent());
|
||||
Recurring recurring = tm().loadByKey(domain.getAutorenewBillingEvent());
|
||||
ofy().save().entity(recurring.asBuilder().setRecurrenceEndTime(newEndTime).build());
|
||||
}
|
||||
|
||||
@@ -1022,7 +1022,7 @@ public class DomainFlowUtils {
|
||||
for (DesignatedContact contact : contacts) {
|
||||
builder.add(
|
||||
ForeignKeyedDesignatedContact.create(
|
||||
contact.getType(), tm().load(contact.getContactKey()).getContactId()));
|
||||
contact.getType(), tm().loadByKey(contact.getContactKey()).getContactId()));
|
||||
}
|
||||
return builder.build();
|
||||
}
|
||||
|
||||
@@ -101,8 +101,8 @@ public final class DomainInfoFlow implements Flow {
|
||||
flowCustomLogic.afterValidation(
|
||||
AfterValidationParameters.newBuilder().setDomain(domain).build());
|
||||
// Prefetch all referenced resources. Calling values() blocks until loading is done.
|
||||
tm().load(domain.getNameservers());
|
||||
tm().load(domain.getReferencedContacts());
|
||||
tm().loadByKeys(domain.getNameservers());
|
||||
tm().loadByKeys(domain.getReferencedContacts());
|
||||
// Registrars can only see a few fields on unauthorized domains.
|
||||
// This is a policy decision that is left up to us by the rfcs.
|
||||
DomainInfoData.Builder infoBuilder =
|
||||
@@ -110,7 +110,7 @@ public final class DomainInfoFlow implements Flow {
|
||||
.setFullyQualifiedDomainName(domain.getDomainName())
|
||||
.setRepoId(domain.getRepoId())
|
||||
.setCurrentSponsorClientId(domain.getCurrentSponsorClientId())
|
||||
.setRegistrant(tm().load(domain.getRegistrant()).getContactId());
|
||||
.setRegistrant(tm().loadByKey(domain.getRegistrant()).getContactId());
|
||||
// If authInfo is non-null, then the caller is authorized to see the full information since we
|
||||
// will have already verified the authInfo is valid.
|
||||
if (clientId.equals(domain.getCurrentSponsorClientId()) || authInfo.isPresent()) {
|
||||
|
||||
@@ -15,14 +15,13 @@
|
||||
package google.registry.flows.domain.token;
|
||||
|
||||
import static com.google.common.base.Preconditions.checkArgument;
|
||||
import static google.registry.model.ofy.ObjectifyService.ofy;
|
||||
import static google.registry.persistence.transaction.TransactionManagerFactory.tm;
|
||||
|
||||
import com.google.common.base.Strings;
|
||||
import com.google.common.collect.ImmutableList;
|
||||
import com.google.common.collect.ImmutableMap;
|
||||
import com.google.common.collect.Maps;
|
||||
import com.google.common.net.InternetDomainName;
|
||||
import com.googlecode.objectify.Key;
|
||||
import google.registry.flows.EppException;
|
||||
import google.registry.flows.EppException.AssociationProhibitsOperationException;
|
||||
import google.registry.flows.EppException.AuthorizationErrorException;
|
||||
@@ -32,7 +31,8 @@ import google.registry.model.domain.token.AllocationToken;
|
||||
import google.registry.model.domain.token.AllocationToken.TokenStatus;
|
||||
import google.registry.model.domain.token.AllocationToken.TokenType;
|
||||
import google.registry.model.registry.Registry;
|
||||
import google.registry.persistence.DomainHistoryVKey;
|
||||
import google.registry.model.reporting.HistoryEntry;
|
||||
import google.registry.persistence.VKey;
|
||||
import java.util.List;
|
||||
import java.util.Optional;
|
||||
import javax.inject.Inject;
|
||||
@@ -107,7 +107,7 @@ public class AllocationTokenFlowUtils {
|
||||
|
||||
/** Redeems a SINGLE_USE {@link AllocationToken}, returning the redeemed copy. */
|
||||
public AllocationToken redeemToken(
|
||||
AllocationToken token, DomainHistoryVKey redemptionHistoryEntry) {
|
||||
AllocationToken token, VKey<? extends HistoryEntry> redemptionHistoryEntry) {
|
||||
checkArgument(
|
||||
TokenType.SINGLE_USE.equals(token.getTokenType()),
|
||||
"Only SINGLE_USE tokens can be marked as redeemed");
|
||||
@@ -152,14 +152,15 @@ public class AllocationTokenFlowUtils {
|
||||
// See https://tools.ietf.org/html/draft-ietf-regext-allocation-token-04#section-2.1
|
||||
throw new InvalidAllocationTokenException();
|
||||
}
|
||||
AllocationToken tokenEntity = ofy().load().key(Key.create(AllocationToken.class, token)).now();
|
||||
if (tokenEntity == null) {
|
||||
Optional<AllocationToken> maybeTokenEntity =
|
||||
tm().loadByKeyIfPresent(VKey.create(AllocationToken.class, token));
|
||||
if (!maybeTokenEntity.isPresent()) {
|
||||
throw new InvalidAllocationTokenException();
|
||||
}
|
||||
if (tokenEntity.isRedeemed()) {
|
||||
if (maybeTokenEntity.get().isRedeemed()) {
|
||||
throw new AlreadyRedeemedAllocationTokenException();
|
||||
}
|
||||
return tokenEntity;
|
||||
return maybeTokenEntity.get();
|
||||
}
|
||||
|
||||
// Note: exception messages should be <= 32 characters long for domain check results
|
||||
|
||||
@@ -21,10 +21,8 @@ import static google.registry.flows.host.HostFlowUtils.validateHostName;
|
||||
import static google.registry.flows.host.HostFlowUtils.verifySuperordinateDomainNotInPendingDelete;
|
||||
import static google.registry.flows.host.HostFlowUtils.verifySuperordinateDomainOwnership;
|
||||
import static google.registry.model.EppResourceUtils.createRepoId;
|
||||
import static google.registry.model.ofy.ObjectifyService.ofy;
|
||||
import static google.registry.persistence.transaction.TransactionManagerFactory.tm;
|
||||
import static google.registry.util.CollectionUtils.isNullOrEmpty;
|
||||
import static google.registry.util.CollectionUtils.union;
|
||||
|
||||
import com.google.common.collect.ImmutableSet;
|
||||
import com.googlecode.objectify.Key;
|
||||
@@ -137,13 +135,11 @@ public final class HostCreateFlow implements TransactionalFlow {
|
||||
ImmutableSet<ImmutableObject> entitiesToSave =
|
||||
ImmutableSet.of(
|
||||
newHost,
|
||||
historyBuilder.build(),
|
||||
historyBuilder.build().toChildHistoryEntity(),
|
||||
ForeignKeyIndex.create(newHost, newHost.getDeletionTime()),
|
||||
EppResourceIndex.create(Key.create(newHost)));
|
||||
if (superordinateDomain.isPresent()) {
|
||||
entitiesToSave =
|
||||
union(
|
||||
entitiesToSave,
|
||||
tm().update(
|
||||
superordinateDomain
|
||||
.get()
|
||||
.asBuilder()
|
||||
@@ -153,7 +149,7 @@ public final class HostCreateFlow implements TransactionalFlow {
|
||||
// they are only written as NS records from the referencing domain.
|
||||
dnsQueue.addHostRefreshTask(targetId);
|
||||
}
|
||||
ofy().save().entities(entitiesToSave);
|
||||
tm().insertAll(entitiesToSave);
|
||||
return responseBuilder.setResData(HostCreateData.create(targetId, now)).build();
|
||||
}
|
||||
|
||||
|
||||
@@ -21,7 +21,6 @@ import static google.registry.flows.ResourceFlowUtils.verifyNoDisallowedStatuses
|
||||
import static google.registry.flows.ResourceFlowUtils.verifyResourceOwnership;
|
||||
import static google.registry.flows.host.HostFlowUtils.validateHostName;
|
||||
import static google.registry.model.eppoutput.Result.Code.SUCCESS_WITH_ACTION_PENDING;
|
||||
import static google.registry.model.ofy.ObjectifyService.ofy;
|
||||
import static google.registry.persistence.transaction.TransactionManagerFactory.tm;
|
||||
|
||||
import com.google.common.collect.ImmutableSet;
|
||||
@@ -96,7 +95,7 @@ public final class HostDeleteFlow implements TransactionalFlow {
|
||||
// the client id, needs to be read off of it.
|
||||
EppResource owningResource =
|
||||
existingHost.isSubordinate()
|
||||
? tm().load(existingHost.getSuperordinateDomain()).cloneProjectedAtTime(now)
|
||||
? tm().loadByKey(existingHost.getSuperordinateDomain()).cloneProjectedAtTime(now)
|
||||
: existingHost;
|
||||
verifyResourceOwnership(clientId, owningResource);
|
||||
}
|
||||
@@ -108,7 +107,8 @@ public final class HostDeleteFlow implements TransactionalFlow {
|
||||
.setType(HistoryEntry.Type.HOST_PENDING_DELETE)
|
||||
.setModificationTime(now)
|
||||
.setParent(Key.create(existingHost));
|
||||
ofy().save().<Object>entities(newHost, historyBuilder.build());
|
||||
tm().insert(historyBuilder.build().toChildHistoryEntity());
|
||||
tm().update(newHost);
|
||||
return responseBuilder.setResultFromCode(SUCCESS_WITH_ACTION_PENDING).build();
|
||||
}
|
||||
}
|
||||
|
||||
@@ -21,7 +21,6 @@ import static google.registry.model.EppResourceUtils.isLinked;
|
||||
import static google.registry.persistence.transaction.TransactionManagerFactory.tm;
|
||||
|
||||
import com.google.common.collect.ImmutableSet;
|
||||
import com.googlecode.objectify.Key;
|
||||
import google.registry.flows.EppException;
|
||||
import google.registry.flows.ExtensionManager;
|
||||
import google.registry.flows.Flow;
|
||||
@@ -68,7 +67,7 @@ public final class HostInfoFlow implements Flow {
|
||||
HostResource host = loadAndVerifyExistence(HostResource.class, targetId, now);
|
||||
ImmutableSet.Builder<StatusValue> statusValues = new ImmutableSet.Builder<>();
|
||||
statusValues.addAll(host.getStatusValues());
|
||||
if (isLinked(Key.create(host), now)) {
|
||||
if (isLinked(host.createVKey(), now)) {
|
||||
statusValues.add(StatusValue.LINKED);
|
||||
}
|
||||
HostInfoData.Builder hostInfoDataBuilder = HostInfoData.newBuilder();
|
||||
@@ -77,7 +76,7 @@ public final class HostInfoFlow implements Flow {
|
||||
// there is no superordinate domain, the host's own values for these fields will be correct.
|
||||
if (host.isSubordinate()) {
|
||||
DomainBase superordinateDomain =
|
||||
tm().load(host.getSuperordinateDomain()).cloneProjectedAtTime(now);
|
||||
tm().loadByKey(host.getSuperordinateDomain()).cloneProjectedAtTime(now);
|
||||
hostInfoDataBuilder
|
||||
.setCurrentSponsorClientId(superordinateDomain.getCurrentSponsorClientId())
|
||||
.setLastTransferTime(host.computeLastTransferTime(superordinateDomain));
|
||||
|
||||
@@ -27,7 +27,6 @@ import static google.registry.flows.host.HostFlowUtils.validateHostName;
|
||||
import static google.registry.flows.host.HostFlowUtils.verifySuperordinateDomainNotInPendingDelete;
|
||||
import static google.registry.flows.host.HostFlowUtils.verifySuperordinateDomainOwnership;
|
||||
import static google.registry.model.index.ForeignKeyIndex.loadAndGetKey;
|
||||
import static google.registry.model.ofy.ObjectifyService.ofy;
|
||||
import static google.registry.persistence.transaction.TransactionManagerFactory.tm;
|
||||
import static google.registry.util.CollectionUtils.isNullOrEmpty;
|
||||
|
||||
@@ -139,7 +138,7 @@ public final class HostUpdateFlow implements TransactionalFlow {
|
||||
String newHostName = firstNonNull(suppliedNewHostName, oldHostName);
|
||||
DomainBase oldSuperordinateDomain =
|
||||
existingHost.isSubordinate()
|
||||
? tm().load(existingHost.getSuperordinateDomain()).cloneProjectedAtTime(now)
|
||||
? tm().loadByKey(existingHost.getSuperordinateDomain()).cloneProjectedAtTime(now)
|
||||
: null;
|
||||
// Note that lookupSuperordinateDomain calls cloneProjectedAtTime on the domain for us.
|
||||
Optional<DomainBase> newSuperordinateDomain =
|
||||
@@ -191,23 +190,26 @@ public final class HostUpdateFlow implements TransactionalFlow {
|
||||
.setPersistedCurrentSponsorClientId(newPersistedClientId)
|
||||
.build();
|
||||
verifyHasIpsIffIsExternal(command, existingHost, newHost);
|
||||
ImmutableSet.Builder<ImmutableObject> entitiesToSave = new ImmutableSet.Builder<>();
|
||||
entitiesToSave.add(newHost);
|
||||
ImmutableSet.Builder<ImmutableObject> entitiesToInsert = new ImmutableSet.Builder<>();
|
||||
ImmutableSet.Builder<ImmutableObject> entitiesToUpdate = new ImmutableSet.Builder<>();
|
||||
entitiesToUpdate.add(newHost);
|
||||
// Keep the {@link ForeignKeyIndex} for this host up to date.
|
||||
if (isHostRename) {
|
||||
// Update the foreign key for the old host name and save one for the new host name.
|
||||
entitiesToSave.add(
|
||||
ForeignKeyIndex.create(existingHost, now),
|
||||
ForeignKeyIndex.create(newHost, newHost.getDeletionTime()));
|
||||
entitiesToUpdate.add(ForeignKeyIndex.create(existingHost, now));
|
||||
entitiesToUpdate.add(ForeignKeyIndex.create(newHost, newHost.getDeletionTime()));
|
||||
updateSuperordinateDomains(existingHost, newHost);
|
||||
}
|
||||
enqueueTasks(existingHost, newHost);
|
||||
entitiesToSave.add(historyBuilder
|
||||
.setType(HistoryEntry.Type.HOST_UPDATE)
|
||||
.setModificationTime(now)
|
||||
.setParent(Key.create(existingHost))
|
||||
.build());
|
||||
ofy().save().entities(entitiesToSave.build());
|
||||
entitiesToInsert.add(
|
||||
historyBuilder
|
||||
.setType(HistoryEntry.Type.HOST_UPDATE)
|
||||
.setModificationTime(now)
|
||||
.setParent(Key.create(existingHost))
|
||||
.build()
|
||||
.toChildHistoryEntity());
|
||||
tm().updateAll(entitiesToUpdate.build());
|
||||
tm().insertAll(entitiesToInsert.build());
|
||||
return responseBuilder.build();
|
||||
}
|
||||
|
||||
@@ -286,7 +288,7 @@ public final class HostUpdateFlow implements TransactionalFlow {
|
||||
&& Objects.equals(
|
||||
existingHost.getSuperordinateDomain(), newHost.getSuperordinateDomain())) {
|
||||
tm().put(
|
||||
tm().load(existingHost.getSuperordinateDomain())
|
||||
tm().loadByKey(existingHost.getSuperordinateDomain())
|
||||
.asBuilder()
|
||||
.removeSubordinateHost(existingHost.getHostName())
|
||||
.addSubordinateHost(newHost.getHostName())
|
||||
@@ -295,14 +297,14 @@ public final class HostUpdateFlow implements TransactionalFlow {
|
||||
}
|
||||
if (existingHost.isSubordinate()) {
|
||||
tm().put(
|
||||
tm().load(existingHost.getSuperordinateDomain())
|
||||
tm().loadByKey(existingHost.getSuperordinateDomain())
|
||||
.asBuilder()
|
||||
.removeSubordinateHost(existingHost.getHostName())
|
||||
.build());
|
||||
}
|
||||
if (newHost.isSubordinate()) {
|
||||
tm().put(
|
||||
tm().load(newHost.getSuperordinateDomain())
|
||||
tm().loadByKey(newHost.getSuperordinateDomain())
|
||||
.asBuilder()
|
||||
.addSubordinateHost(newHost.getHostName())
|
||||
.build());
|
||||
|
||||
@@ -15,6 +15,7 @@
|
||||
package google.registry.mapreduce;
|
||||
|
||||
import static google.registry.mapreduce.MapreduceRunner.PARAM_DRY_RUN;
|
||||
import static google.registry.mapreduce.MapreduceRunner.PARAM_FAST;
|
||||
import static google.registry.mapreduce.MapreduceRunner.PARAM_MAP_SHARDS;
|
||||
import static google.registry.mapreduce.MapreduceRunner.PARAM_REDUCE_SHARDS;
|
||||
import static google.registry.request.RequestParameters.extractBooleanParameter;
|
||||
@@ -36,6 +37,12 @@ public final class MapreduceModule {
|
||||
return extractBooleanParameter(req, PARAM_DRY_RUN);
|
||||
}
|
||||
|
||||
@Provides
|
||||
@Parameter(PARAM_FAST)
|
||||
static boolean provideIsFast(HttpServletRequest req) {
|
||||
return extractBooleanParameter(req, PARAM_FAST);
|
||||
}
|
||||
|
||||
@Provides
|
||||
@Parameter(PARAM_MAP_SHARDS)
|
||||
static Optional<Integer> provideMapShards(HttpServletRequest req) {
|
||||
|
||||
@@ -55,6 +55,7 @@ public class MapreduceRunner {
|
||||
public static final String PARAM_DRY_RUN = "dryRun";
|
||||
public static final String PARAM_MAP_SHARDS = "mapShards";
|
||||
public static final String PARAM_REDUCE_SHARDS = "reduceShards";
|
||||
public static final String PARAM_FAST = "fast";
|
||||
|
||||
private static final String BASE_URL = "/_dr/mapreduce/";
|
||||
private static final String QUEUE_NAME = "mapreduce";
|
||||
|
||||
@@ -0,0 +1,48 @@
|
||||
// Copyright 2020 The Nomulus Authors. All Rights Reserved.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package google.registry.model;
|
||||
|
||||
import com.google.common.flogger.FluentLogger;
|
||||
import google.registry.config.RegistryEnvironment;
|
||||
import google.registry.model.common.DatabaseTransitionSchedule;
|
||||
import google.registry.model.common.DatabaseTransitionSchedule.PrimaryDatabase;
|
||||
import google.registry.model.common.DatabaseTransitionSchedule.TransitionId;
|
||||
|
||||
/** Utility methods related to migrating dual-read/dual-write entities. */
|
||||
public class DatabaseMigrationUtils {
|
||||
|
||||
private static final FluentLogger logger = FluentLogger.forEnclosingClass();
|
||||
|
||||
/** Throws exceptions only in unit tests, otherwise only logs exceptions. */
|
||||
public static void suppressExceptionUnlessInTest(Runnable work, String message) {
|
||||
try {
|
||||
work.run();
|
||||
} catch (Exception e) {
|
||||
if (RegistryEnvironment.get().equals(RegistryEnvironment.UNITTEST)) {
|
||||
throw e;
|
||||
}
|
||||
logger.atWarning().withCause(e).log(message);
|
||||
}
|
||||
}
|
||||
|
||||
/** Gets the value for the database currently considered primary. */
|
||||
public static PrimaryDatabase getPrimaryDatabase(TransitionId transitionId) {
|
||||
return DatabaseTransitionSchedule.getCached(transitionId)
|
||||
.map(DatabaseTransitionSchedule::getPrimaryDatabase)
|
||||
.orElse(PrimaryDatabase.DATASTORE);
|
||||
}
|
||||
|
||||
private DatabaseMigrationUtils() {}
|
||||
}
|
||||
@@ -17,6 +17,7 @@ package google.registry.model;
|
||||
import com.google.common.collect.ImmutableSet;
|
||||
import google.registry.model.billing.BillingEvent;
|
||||
import google.registry.model.common.Cursor;
|
||||
import google.registry.model.common.DatabaseTransitionSchedule;
|
||||
import google.registry.model.common.EntityGroupRoot;
|
||||
import google.registry.model.common.GaeUserIdConverter;
|
||||
import google.registry.model.contact.ContactHistory;
|
||||
@@ -74,6 +75,7 @@ public final class EntityClasses {
|
||||
ContactHistory.class,
|
||||
ContactResource.class,
|
||||
Cursor.class,
|
||||
DatabaseTransitionSchedule.class,
|
||||
DomainBase.class,
|
||||
DomainHistory.class,
|
||||
EntityGroupRoot.class,
|
||||
|
||||
@@ -148,7 +148,7 @@ public abstract class EppResource extends BackupGroupRoot implements Buildable {
|
||||
*
|
||||
* @see google.registry.model.translators.CommitLogRevisionsTranslatorFactory
|
||||
*/
|
||||
@Transient
|
||||
@Transient @DoNotCompare
|
||||
ImmutableSortedMap<DateTime, Key<CommitLogManifest>> revisions = ImmutableSortedMap.of();
|
||||
|
||||
public String getRepoId() {
|
||||
@@ -360,13 +360,13 @@ public abstract class EppResource extends BackupGroupRoot implements Buildable {
|
||||
|
||||
@Override
|
||||
public EppResource load(VKey<? extends EppResource> key) {
|
||||
return tm().doTransactionless(() -> tm().load(key));
|
||||
return tm().doTransactionless(() -> tm().loadByKey(key));
|
||||
}
|
||||
|
||||
@Override
|
||||
public Map<VKey<? extends EppResource>, EppResource> loadAll(
|
||||
Iterable<? extends VKey<? extends EppResource>> keys) {
|
||||
return tm().doTransactionless(() -> tm().load(keys));
|
||||
return tm().doTransactionless(() -> tm().loadByKeys(keys));
|
||||
}
|
||||
};
|
||||
|
||||
@@ -406,7 +406,7 @@ public abstract class EppResource extends BackupGroupRoot implements Buildable {
|
||||
public static ImmutableMap<VKey<? extends EppResource>, EppResource> loadCached(
|
||||
Iterable<VKey<? extends EppResource>> keys) {
|
||||
if (!RegistryConfig.isEppResourceCachingEnabled()) {
|
||||
return tm().load(keys);
|
||||
return tm().loadByKeys(keys);
|
||||
}
|
||||
try {
|
||||
return cacheEppResources.getAll(keys);
|
||||
@@ -423,7 +423,7 @@ public abstract class EppResource extends BackupGroupRoot implements Buildable {
|
||||
*/
|
||||
public static <T extends EppResource> T loadCached(VKey<T> key) {
|
||||
if (!RegistryConfig.isEppResourceCachingEnabled()) {
|
||||
return tm().load(key);
|
||||
return tm().loadByKey(key);
|
||||
}
|
||||
try {
|
||||
// Safe to cast because loading a Key<T> returns an entity of type T.
|
||||
|
||||
@@ -17,7 +17,9 @@ package google.registry.model;
|
||||
import static com.google.common.base.Preconditions.checkArgument;
|
||||
import static com.google.common.collect.ImmutableSet.toImmutableSet;
|
||||
import static google.registry.model.ofy.ObjectifyService.ofy;
|
||||
import static google.registry.persistence.transaction.TransactionManagerFactory.jpaTm;
|
||||
import static google.registry.persistence.transaction.TransactionManagerFactory.tm;
|
||||
import static google.registry.persistence.transaction.TransactionManagerUtil.transactIfJpaTm;
|
||||
import static google.registry.util.DateTimeUtils.isAtOrAfter;
|
||||
import static google.registry.util.DateTimeUtils.isBeforeOrAt;
|
||||
import static google.registry.util.DateTimeUtils.latestOf;
|
||||
@@ -27,7 +29,6 @@ import com.google.common.collect.ImmutableSet;
|
||||
import com.google.common.flogger.FluentLogger;
|
||||
import com.googlecode.objectify.Key;
|
||||
import com.googlecode.objectify.Result;
|
||||
import com.googlecode.objectify.cmd.Query;
|
||||
import com.googlecode.objectify.util.ResultNow;
|
||||
import google.registry.config.RegistryConfig;
|
||||
import google.registry.model.EppResource.BuilderWithTransferData;
|
||||
@@ -36,6 +37,7 @@ import google.registry.model.EppResource.ResourceWithTransferData;
|
||||
import google.registry.model.contact.ContactResource;
|
||||
import google.registry.model.domain.DomainBase;
|
||||
import google.registry.model.eppcommon.StatusValue;
|
||||
import google.registry.model.host.HostResource;
|
||||
import google.registry.model.index.ForeignKeyIndex;
|
||||
import google.registry.model.ofy.CommitLogManifest;
|
||||
import google.registry.model.ofy.CommitLogMutation;
|
||||
@@ -43,11 +45,13 @@ import google.registry.model.registry.Registry;
|
||||
import google.registry.model.transfer.DomainTransferData;
|
||||
import google.registry.model.transfer.TransferData;
|
||||
import google.registry.model.transfer.TransferStatus;
|
||||
import google.registry.persistence.VKey;
|
||||
import java.util.List;
|
||||
import java.util.Map.Entry;
|
||||
import java.util.Optional;
|
||||
import java.util.function.Function;
|
||||
import javax.annotation.Nullable;
|
||||
import javax.persistence.Query;
|
||||
import org.joda.time.DateTime;
|
||||
import org.joda.time.Interval;
|
||||
|
||||
@@ -56,6 +60,22 @@ public final class EppResourceUtils {
|
||||
|
||||
private static final FluentLogger logger = FluentLogger.forEnclosingClass();
|
||||
|
||||
private static final String CONTACT_LINKED_DOMAIN_QUERY =
|
||||
"SELECT repoId FROM Domain "
|
||||
+ "WHERE (adminContact = :fkRepoId "
|
||||
+ "OR billingContact = :fkRepoId "
|
||||
+ "OR techContact = :fkRepoId "
|
||||
+ "OR registrantContact = :fkRepoId) "
|
||||
+ "AND deletionTime > :now";
|
||||
|
||||
// We have to use the native SQL query here because DomainHost table doesn't have its entity
|
||||
// class so we cannot reference its property like domainHost.hostRepoId in a JPQL query.
|
||||
private static final String HOST_LINKED_DOMAIN_QUERY =
|
||||
"SELECT d.repo_id FROM \"Domain\" d "
|
||||
+ "JOIN \"DomainHost\" dh ON dh.domain_repo_id = d.repo_id "
|
||||
+ "WHERE d.deletion_time > :now "
|
||||
+ "AND dh.host_repo_id = :fkRepoId";
|
||||
|
||||
/** Returns the full domain repoId in the format HEX-TLD for the specified long id and tld. */
|
||||
public static String createDomainRepoId(long repoId, String tld) {
|
||||
return createRepoId(repoId, Registry.get(tld).getRoidSuffix());
|
||||
@@ -135,7 +155,7 @@ public final class EppResourceUtils {
|
||||
useCache
|
||||
? ForeignKeyIndex.loadCached(clazz, ImmutableList.of(foreignKey), now)
|
||||
.getOrDefault(foreignKey, null)
|
||||
: ofy().load().type(ForeignKeyIndex.mapToFkiClass(clazz)).id(foreignKey).now();
|
||||
: ForeignKeyIndex.load(clazz, foreignKey, now);
|
||||
// The value of fki.getResourceKey() might be null for hard-deleted prober data.
|
||||
if (fki == null || isAtOrAfter(now, fki.getDeletionTime()) || fki.getResourceKey() == null) {
|
||||
return Optional.empty();
|
||||
@@ -143,7 +163,7 @@ public final class EppResourceUtils {
|
||||
T resource =
|
||||
useCache
|
||||
? EppResource.loadCached(fki.getResourceKey())
|
||||
: tm().maybeLoad(fki.getResourceKey()).orElse(null);
|
||||
: transactIfJpaTm(() -> tm().loadByKeyIfPresent(fki.getResourceKey()).orElse(null));
|
||||
if (resource == null || isAtOrAfter(now, resource.getDeletionTime())) {
|
||||
return Optional.empty();
|
||||
}
|
||||
@@ -364,21 +384,63 @@ public final class EppResourceUtils {
|
||||
}
|
||||
|
||||
/**
|
||||
* Returns a query for domains or applications that reference a specified contact or host.
|
||||
* Returns a set of {@link VKey} for domains that reference a specified contact or host.
|
||||
*
|
||||
* <p>This is an eventually consistent query.
|
||||
* <p>This is an eventually consistent query if used for Datastore.
|
||||
*
|
||||
* @param key the referent key
|
||||
* @param now the logical time of the check
|
||||
* @param limit the maximum number of returned keys
|
||||
*/
|
||||
public static Query<DomainBase> queryForLinkedDomains(
|
||||
Key<? extends EppResource> key, DateTime now) {
|
||||
boolean isContactKey = key.getKind().equals(Key.getKind(ContactResource.class));
|
||||
return ofy()
|
||||
.load()
|
||||
.type(DomainBase.class)
|
||||
.filter(isContactKey ? "allContacts.contact" : "nsHosts", key)
|
||||
.filter("deletionTime >", now);
|
||||
public static ImmutableSet<VKey<DomainBase>> getLinkedDomainKeys(
|
||||
VKey<? extends EppResource> key, DateTime now, int limit) {
|
||||
checkArgument(
|
||||
key.getKind().equals(ContactResource.class) || key.getKind().equals(HostResource.class),
|
||||
"key must be either VKey<ContactResource> or VKey<HostResource>, but it is %s",
|
||||
key);
|
||||
boolean isContactKey = key.getKind().equals(ContactResource.class);
|
||||
if (tm().isOfy()) {
|
||||
return ofy()
|
||||
.load()
|
||||
.type(DomainBase.class)
|
||||
.filter(isContactKey ? "allContacts.contact" : "nsHosts", key.getOfyKey())
|
||||
.filter("deletionTime >", now)
|
||||
.limit(limit)
|
||||
.keys()
|
||||
.list()
|
||||
.stream()
|
||||
.map(DomainBase::createVKey)
|
||||
.collect(toImmutableSet());
|
||||
} else {
|
||||
return tm().transact(
|
||||
() -> {
|
||||
Query query;
|
||||
if (isContactKey) {
|
||||
query =
|
||||
jpaTm()
|
||||
.getEntityManager()
|
||||
.createQuery(CONTACT_LINKED_DOMAIN_QUERY, String.class)
|
||||
.setParameter("fkRepoId", key)
|
||||
.setParameter("now", now);
|
||||
} else {
|
||||
query =
|
||||
jpaTm()
|
||||
.getEntityManager()
|
||||
.createNativeQuery(HOST_LINKED_DOMAIN_QUERY)
|
||||
.setParameter("fkRepoId", key.getSqlKey())
|
||||
.setParameter("now", now.toDate());
|
||||
}
|
||||
return (ImmutableSet<VKey<DomainBase>>)
|
||||
query
|
||||
.setMaxResults(limit)
|
||||
.getResultStream()
|
||||
.map(
|
||||
repoId ->
|
||||
DomainBase.createVKey(
|
||||
Key.create(DomainBase.class, (String) repoId)))
|
||||
.collect(toImmutableSet());
|
||||
});
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
@@ -389,8 +451,8 @@ public final class EppResourceUtils {
|
||||
* @param key the referent key
|
||||
* @param now the logical time of the check
|
||||
*/
|
||||
public static boolean isLinked(Key<? extends EppResource> key, DateTime now) {
|
||||
return queryForLinkedDomains(key, now).limit(1).count() > 0;
|
||||
public static boolean isLinked(VKey<? extends EppResource> key, DateTime now) {
|
||||
return getLinkedDomainKeys(key, now, 1).size() > 0;
|
||||
}
|
||||
|
||||
private EppResourceUtils() {}
|
||||
|
||||
@@ -54,9 +54,37 @@ public abstract class ImmutableObject implements Cloneable {
|
||||
@Target(FIELD)
|
||||
public @interface DoNotHydrate {}
|
||||
|
||||
@Ignore
|
||||
@XmlTransient
|
||||
Integer hashCode;
|
||||
/**
|
||||
* Indicates that the field should be ignored when comparing an object in the datastore to the
|
||||
* corresponding object in Cloud SQL.
|
||||
*/
|
||||
@Documented
|
||||
@Retention(RUNTIME)
|
||||
@Target(FIELD)
|
||||
public @interface DoNotCompare {}
|
||||
|
||||
/**
|
||||
* Indicates that the field stores a null value to indicate an empty set. This is also used in
|
||||
* object comparison.
|
||||
*/
|
||||
@Documented
|
||||
@Retention(RUNTIME)
|
||||
@Target(FIELD)
|
||||
public @interface EmptySetToNull {}
|
||||
|
||||
/**
|
||||
* Indicates that the field does not take part in the immutability contract.
|
||||
*
|
||||
* <p>Certain fields currently get modified by hibernate and there is nothing we can do about it.
|
||||
* As well as violating immutability, this breaks hashing and equality comparisons, so we mark
|
||||
* these fields with this annotation to exclude them from most operations.
|
||||
*/
|
||||
@Documented
|
||||
@Retention(RUNTIME)
|
||||
@Target(FIELD)
|
||||
public @interface Insignificant {}
|
||||
|
||||
@Ignore @XmlTransient protected Integer hashCode;
|
||||
|
||||
private boolean equalsImmutableObject(ImmutableObject other) {
|
||||
return getClass().equals(other.getClass())
|
||||
@@ -71,7 +99,14 @@ public abstract class ImmutableObject implements Cloneable {
|
||||
* <p>Isolated into a method so that derived classes can override it.
|
||||
*/
|
||||
protected Map<Field, Object> getSignificantFields() {
|
||||
return ModelUtils.getFieldValues(this);
|
||||
// Can't use streams or ImmutableMap because we can have null values.
|
||||
Map<Field, Object> result = new LinkedHashMap();
|
||||
for (Map.Entry<Field, Object> entry : ModelUtils.getFieldValues(this).entrySet()) {
|
||||
if (!entry.getKey().isAnnotationPresent(Insignificant.class)) {
|
||||
result.put(entry.getKey(), entry.getValue());
|
||||
}
|
||||
}
|
||||
return result;
|
||||
}
|
||||
|
||||
@Override
|
||||
|
||||
@@ -22,12 +22,17 @@ import javax.annotation.Nullable;
|
||||
import org.joda.time.DateTime;
|
||||
|
||||
/**
|
||||
* A timestamp that auto-updates on each save to Datastore.
|
||||
* A timestamp that auto-updates on each save to Datastore/Cloud SQL.
|
||||
*
|
||||
* @see UpdateAutoTimestampTranslatorFactory
|
||||
*/
|
||||
public class UpdateAutoTimestamp extends ImmutableObject {
|
||||
|
||||
// When set to true, database converters/translators should do tha auto update. When set to
|
||||
// false, auto update should be suspended (this exists to allow us to preserve the original value
|
||||
// during a replay).
|
||||
private static ThreadLocal<Boolean> autoUpdateEnabled = ThreadLocal.withInitial(() -> true);
|
||||
|
||||
DateTime timestamp;
|
||||
|
||||
/** Returns the timestamp, or {@code START_OF_TIME} if it's null. */
|
||||
@@ -40,4 +45,30 @@ public class UpdateAutoTimestamp extends ImmutableObject {
|
||||
instance.timestamp = timestamp;
|
||||
return instance;
|
||||
}
|
||||
|
||||
// TODO(b/175610935): Remove the auto-update disabling code below after migration.
|
||||
|
||||
/** Class to allow us to safely disable auto-update in a try-with-resources block. */
|
||||
public static class DisableAutoUpdateResource implements AutoCloseable {
|
||||
DisableAutoUpdateResource() {
|
||||
autoUpdateEnabled.set(false);
|
||||
}
|
||||
|
||||
@Override
|
||||
public void close() {
|
||||
autoUpdateEnabled.set(true);
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Resturns a resource that disables auto-updates on all {@link UpdateAutoTimestamp}s in the
|
||||
* current thread, suitable for use with in a try-with-resources block.
|
||||
*/
|
||||
public static DisableAutoUpdateResource disableAutoUpdate() {
|
||||
return new DisableAutoUpdateResource();
|
||||
}
|
||||
|
||||
public static boolean autoUpdateEnabled() {
|
||||
return autoUpdateEnabled.get();
|
||||
}
|
||||
}
|
||||
|
||||
@@ -46,6 +46,8 @@ import google.registry.model.domain.rgp.GracePeriodStatus;
|
||||
import google.registry.model.domain.token.AllocationToken;
|
||||
import google.registry.model.reporting.HistoryEntry;
|
||||
import google.registry.model.transfer.TransferData.TransferServerApproveEntity;
|
||||
import google.registry.persistence.BillingVKey.BillingEventVKey;
|
||||
import google.registry.persistence.BillingVKey.BillingRecurrenceVKey;
|
||||
import google.registry.persistence.VKey;
|
||||
import google.registry.persistence.WithLongVKey;
|
||||
import google.registry.schema.replay.DatastoreAndSqlEntity;
|
||||
@@ -572,8 +574,7 @@ public abstract class BillingEvent extends ImmutableObject
|
||||
* <p>Although the type is {@link Key} the name "ref" is preserved for historical reasons.
|
||||
*/
|
||||
@IgnoreSave(IfNull.class)
|
||||
@Column(name = "billing_event_id")
|
||||
VKey<BillingEvent.OneTime> refOneTime = null;
|
||||
BillingEventVKey refOneTime = null;
|
||||
|
||||
/**
|
||||
* The recurring billing event to cancel, or null for non-autorenew cancellations.
|
||||
@@ -581,15 +582,14 @@ public abstract class BillingEvent extends ImmutableObject
|
||||
* <p>Although the type is {@link Key} the name "ref" is preserved for historical reasons.
|
||||
*/
|
||||
@IgnoreSave(IfNull.class)
|
||||
@Column(name = "billing_recurrence_id")
|
||||
VKey<BillingEvent.Recurring> refRecurring = null;
|
||||
BillingRecurrenceVKey refRecurring = null;
|
||||
|
||||
public DateTime getBillingTime() {
|
||||
return billingTime;
|
||||
}
|
||||
|
||||
public VKey<? extends BillingEvent> getEventKey() {
|
||||
return firstNonNull(refOneTime, refRecurring);
|
||||
return firstNonNull(refOneTime, refRecurring).createVKey();
|
||||
}
|
||||
|
||||
/** The mapping from billable grace period types to originating billing event reasons. */
|
||||
@@ -656,12 +656,12 @@ public abstract class BillingEvent extends ImmutableObject
|
||||
}
|
||||
|
||||
public Builder setOneTimeEventKey(VKey<BillingEvent.OneTime> eventKey) {
|
||||
getInstance().refOneTime = eventKey;
|
||||
getInstance().refOneTime = BillingEventVKey.create(eventKey);
|
||||
return this;
|
||||
}
|
||||
|
||||
public Builder setRecurringEventKey(VKey<BillingEvent.Recurring> eventKey) {
|
||||
getInstance().refRecurring = eventKey;
|
||||
getInstance().refRecurring = BillingRecurrenceVKey.create(eventKey);
|
||||
return this;
|
||||
}
|
||||
|
||||
|
||||
@@ -0,0 +1,159 @@
|
||||
// Copyright 2021 The Nomulus Authors. All Rights Reserved.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package google.registry.model.common;
|
||||
|
||||
import static com.google.common.base.Preconditions.checkNotNull;
|
||||
import static google.registry.config.RegistryConfig.getSingletonCacheRefreshDuration;
|
||||
import static google.registry.model.common.EntityGroupRoot.getCrossTldKey;
|
||||
import static google.registry.persistence.transaction.TransactionManagerFactory.ofyTm;
|
||||
import static google.registry.persistence.transaction.TransactionManagerFactory.tm;
|
||||
|
||||
import com.google.common.cache.CacheBuilder;
|
||||
import com.google.common.cache.CacheLoader;
|
||||
import com.google.common.cache.LoadingCache;
|
||||
import com.google.common.collect.ImmutableSortedMap;
|
||||
import com.googlecode.objectify.Key;
|
||||
import com.googlecode.objectify.annotation.Embed;
|
||||
import com.googlecode.objectify.annotation.Entity;
|
||||
import com.googlecode.objectify.annotation.Id;
|
||||
import com.googlecode.objectify.annotation.Mapify;
|
||||
import com.googlecode.objectify.annotation.Parent;
|
||||
import google.registry.model.ImmutableObject;
|
||||
import google.registry.model.UpdateAutoTimestamp;
|
||||
import google.registry.model.common.TimedTransitionProperty.TimeMapper;
|
||||
import google.registry.model.common.TimedTransitionProperty.TimedTransition;
|
||||
import google.registry.model.registry.label.PremiumList;
|
||||
import google.registry.model.registry.label.ReservedList;
|
||||
import google.registry.model.smd.SignedMarkRevocationList;
|
||||
import google.registry.persistence.VKey;
|
||||
import google.registry.schema.replay.DatastoreOnlyEntity;
|
||||
import java.util.Optional;
|
||||
import javax.annotation.concurrent.Immutable;
|
||||
import org.joda.time.DateTime;
|
||||
|
||||
@Entity
|
||||
@Immutable
|
||||
public class DatabaseTransitionSchedule extends ImmutableObject implements DatastoreOnlyEntity {
|
||||
|
||||
/**
|
||||
* The name of the database to be treated as the primary database. The first entry in the schedule
|
||||
* will always be Datastore.
|
||||
*/
|
||||
public enum PrimaryDatabase {
|
||||
CLOUD_SQL,
|
||||
DATASTORE
|
||||
}
|
||||
|
||||
/** The id of the transition schedule. */
|
||||
public enum TransitionId {
|
||||
/** The schedule for the migration of {@link PremiumList} and {@link ReservedList}. */
|
||||
DOMAIN_LABEL_LISTS,
|
||||
/** The schedule for the migration of the {@link SignedMarkRevocationList} entity. */
|
||||
SIGNED_MARK_REVOCATION_LIST,
|
||||
}
|
||||
|
||||
/**
|
||||
* The transition to a specified primary database at a specific point in time, for use in a
|
||||
* TimedTransitionProperty.
|
||||
*/
|
||||
@Embed
|
||||
public static class PrimaryDatabaseTransition extends TimedTransition<PrimaryDatabase> {
|
||||
private PrimaryDatabase primaryDatabase;
|
||||
|
||||
@Override
|
||||
protected PrimaryDatabase getValue() {
|
||||
return primaryDatabase;
|
||||
}
|
||||
|
||||
@Override
|
||||
protected void setValue(PrimaryDatabase primaryDatabase) {
|
||||
this.primaryDatabase = primaryDatabase;
|
||||
}
|
||||
}
|
||||
|
||||
@Parent Key<EntityGroupRoot> parent = getCrossTldKey();
|
||||
|
||||
@Id String transitionId;
|
||||
|
||||
/** An automatically managed timestamp of when this schedule was last written to Datastore. */
|
||||
UpdateAutoTimestamp lastUpdateTime = UpdateAutoTimestamp.create(null);
|
||||
|
||||
/** A property that tracks the primary database for a dual-read/dual-write database migration. */
|
||||
@Mapify(TimeMapper.class)
|
||||
TimedTransitionProperty<PrimaryDatabase, PrimaryDatabaseTransition> databaseTransitions =
|
||||
TimedTransitionProperty.forMapify(PrimaryDatabase.DATASTORE, PrimaryDatabaseTransition.class);
|
||||
|
||||
/** A cache that loads the {@link DatabaseTransitionSchedule} for a given id. */
|
||||
private static final LoadingCache<TransitionId, Optional<DatabaseTransitionSchedule>> CACHE =
|
||||
CacheBuilder.newBuilder()
|
||||
.expireAfterWrite(
|
||||
java.time.Duration.ofMillis(getSingletonCacheRefreshDuration().getMillis()))
|
||||
.build(
|
||||
new CacheLoader<TransitionId, Optional<DatabaseTransitionSchedule>>() {
|
||||
@Override
|
||||
public Optional<DatabaseTransitionSchedule> load(TransitionId transitionId) {
|
||||
return DatabaseTransitionSchedule.get(transitionId);
|
||||
}
|
||||
});
|
||||
|
||||
public static DatabaseTransitionSchedule create(
|
||||
TransitionId transitionId,
|
||||
TimedTransitionProperty<PrimaryDatabase, PrimaryDatabaseTransition> databaseTransitions) {
|
||||
checkNotNull(transitionId, "Id cannot be null");
|
||||
checkNotNull(databaseTransitions, "databaseTransitions cannot be null");
|
||||
databaseTransitions.checkValidity();
|
||||
DatabaseTransitionSchedule instance = new DatabaseTransitionSchedule();
|
||||
instance.transitionId = transitionId.name();
|
||||
instance.databaseTransitions = databaseTransitions;
|
||||
return instance;
|
||||
}
|
||||
|
||||
/** Returns the database that is indicated as primary at the given time. */
|
||||
public PrimaryDatabase getPrimaryDatabase() {
|
||||
return databaseTransitions.getValueAtTime(tm().getTransactionTime());
|
||||
}
|
||||
|
||||
/** Returns the database transitions as a map of start time to primary database. */
|
||||
public ImmutableSortedMap<DateTime, PrimaryDatabase> getDatabaseTransitions() {
|
||||
return databaseTransitions.toValueMap();
|
||||
}
|
||||
|
||||
/**
|
||||
* Returns the current cached schedule for the given id.
|
||||
*
|
||||
* <p>WARNING: The schedule returned by this method could be up to 10 minutes out of date.
|
||||
*/
|
||||
public static Optional<DatabaseTransitionSchedule> getCached(TransitionId id) {
|
||||
return CACHE.getUnchecked(id);
|
||||
}
|
||||
|
||||
/** Returns the schedule for a given id. */
|
||||
public static Optional<DatabaseTransitionSchedule> get(TransitionId transitionId) {
|
||||
VKey<DatabaseTransitionSchedule> key =
|
||||
VKey.create(
|
||||
DatabaseTransitionSchedule.class,
|
||||
transitionId,
|
||||
Key.create(getCrossTldKey(), DatabaseTransitionSchedule.class, transitionId.name()));
|
||||
|
||||
return ofyTm().transact(() -> ofyTm().loadByKeyIfPresent(key));
|
||||
}
|
||||
|
||||
@Override
|
||||
public String toString() {
|
||||
return String.format(
|
||||
"%s(last updated at %s): %s",
|
||||
transitionId, lastUpdateTime.getTimestamp(), databaseTransitions.toValueMap());
|
||||
}
|
||||
}
|
||||
@@ -75,7 +75,9 @@ public class DomainBase extends DomainContent
|
||||
}
|
||||
|
||||
@ElementCollection
|
||||
@JoinTable(name = "DomainHost")
|
||||
@JoinTable(
|
||||
name = "DomainHost",
|
||||
indexes = {@Index(columnList = "domain_repo_id,host_repo_id", unique = true)})
|
||||
@Access(AccessType.PROPERTY)
|
||||
@Column(name = "host_repo_id")
|
||||
public Set<VKey<HostResource>> getNsHosts() {
|
||||
|
||||
@@ -14,6 +14,7 @@
|
||||
|
||||
package google.registry.model.domain;
|
||||
|
||||
import static com.google.common.base.MoreObjects.firstNonNull;
|
||||
import static com.google.common.base.Preconditions.checkArgument;
|
||||
import static com.google.common.base.Strings.emptyToNull;
|
||||
import static com.google.common.collect.ImmutableSet.toImmutableSet;
|
||||
@@ -65,6 +66,7 @@ import google.registry.model.transfer.DomainTransferData;
|
||||
import google.registry.model.transfer.TransferStatus;
|
||||
import google.registry.persistence.VKey;
|
||||
import google.registry.util.CollectionUtils;
|
||||
import google.registry.util.DateTimeUtils;
|
||||
import java.util.HashSet;
|
||||
import java.util.Objects;
|
||||
import java.util.Optional;
|
||||
@@ -132,7 +134,7 @@ public class DomainContent extends EppResource
|
||||
@Index String tld;
|
||||
|
||||
/** References to hosts that are the nameservers for the domain. */
|
||||
@Index @Transient Set<VKey<HostResource>> nsHosts;
|
||||
@EmptySetToNull @Index @Transient Set<VKey<HostResource>> nsHosts;
|
||||
|
||||
/**
|
||||
* The union of the contacts visible via {@link #getContacts} and {@link #getRegistrant}.
|
||||
@@ -283,9 +285,10 @@ public class DomainContent extends EppResource
|
||||
/**
|
||||
* When the domain's autorenewal status will expire.
|
||||
*
|
||||
* <p>This will be null for the vast majority of domains because all domains autorenew
|
||||
* indefinitely by default and autorenew can only be countermanded by administrators, typically
|
||||
* for reasons of the URS process or termination of a registrar for nonpayment.
|
||||
* <p>This will be {@link DateTimeUtils#END_OF_TIME} for the vast majority of domains because all
|
||||
* domains autorenew indefinitely by default and autorenew can only be countermanded by
|
||||
* administrators, typically for reasons of the URS process or termination of a registrar for
|
||||
* nonpayment.
|
||||
*
|
||||
* <p>When a domain is scheduled to not autorenew, this field is set to the current value of its
|
||||
* {@link #registrationExpirationTime}, after which point the next invocation of a periodic
|
||||
@@ -294,10 +297,16 @@ public class DomainContent extends EppResource
|
||||
* difference domains that have reached their life and must be deleted now, and domains that
|
||||
* happen to be in the autorenew grace period now but should be deleted in roughly a year.
|
||||
*/
|
||||
@Nullable @Index DateTime autorenewEndTime;
|
||||
@Index DateTime autorenewEndTime;
|
||||
|
||||
@OnLoad
|
||||
void load() {
|
||||
// Back fill with correct END_OF_TIME sentinel value.
|
||||
// TODO(mcilwain): Remove this once back-filling is complete.
|
||||
if (autorenewEndTime == null) {
|
||||
autorenewEndTime = END_OF_TIME;
|
||||
}
|
||||
|
||||
// Reconstitute all of the contacts so that they have VKeys.
|
||||
allContacts =
|
||||
allContacts.stream().map(DesignatedContact::reconstitute).collect(toImmutableSet());
|
||||
@@ -319,10 +328,19 @@ public class DomainContent extends EppResource
|
||||
autorenewPollMessageHistoryId = getHistoryId(autorenewPollMessage);
|
||||
autorenewBillingEventHistoryId = getHistoryId(autorenewBillingEvent);
|
||||
deletePollMessageHistoryId = getHistoryId(deletePollMessage);
|
||||
|
||||
// Fix PollMessage VKeys.
|
||||
autorenewPollMessage = PollMessage.Autorenew.convertVKey(autorenewPollMessage);
|
||||
deletePollMessage = PollMessage.OneTime.convertVKey(deletePollMessage);
|
||||
|
||||
dsData =
|
||||
nullToEmptyImmutableCopy(dsData).stream()
|
||||
.map(dsData -> dsData.cloneWithDomainRepoId(getRepoId()))
|
||||
.collect(toImmutableSet());
|
||||
|
||||
if (transferData != null) {
|
||||
transferData.convertVKeys();
|
||||
}
|
||||
}
|
||||
|
||||
@PostLoad
|
||||
@@ -397,8 +415,20 @@ public class DomainContent extends EppResource
|
||||
return smdId;
|
||||
}
|
||||
|
||||
/**
|
||||
* Returns the autorenew end time if there is one, otherwise empty.
|
||||
*
|
||||
* <p>Note that {@link DateTimeUtils#END_OF_TIME} is used as a sentinel value in the database
|
||||
* representation to signify that autorenew doesn't end, and is mapped to empty here for the
|
||||
* purposes of more legible business logic.
|
||||
*/
|
||||
public Optional<DateTime> getAutorenewEndTime() {
|
||||
return Optional.ofNullable(autorenewEndTime);
|
||||
// TODO(mcilwain): Remove null handling for autorenewEndTime once data migration away from null
|
||||
// is complete.
|
||||
return Optional.ofNullable(
|
||||
(autorenewEndTime == null || autorenewEndTime.equals(END_OF_TIME))
|
||||
? null
|
||||
: autorenewEndTime);
|
||||
}
|
||||
|
||||
@Override
|
||||
@@ -773,6 +803,8 @@ public class DomainContent extends EppResource
|
||||
} else { // There are nameservers, so make sure INACTIVE isn't there.
|
||||
removeStatusValue(StatusValue.INACTIVE);
|
||||
}
|
||||
// If there is no autorenew end time, set it to END_OF_TIME.
|
||||
instance.autorenewEndTime = firstNonNull(getInstance().autorenewEndTime, END_OF_TIME);
|
||||
|
||||
checkArgumentNotNull(emptyToNull(instance.fullyQualifiedDomainName), "Missing domainName");
|
||||
if (instance.getRegistrant() == null
|
||||
@@ -952,8 +984,15 @@ public class DomainContent extends EppResource
|
||||
return thisCastToDerived();
|
||||
}
|
||||
|
||||
/**
|
||||
* Sets the autorenew end time, or clears it if empty is passed.
|
||||
*
|
||||
* <p>Note that {@link DateTimeUtils#END_OF_TIME} is used as a sentinel value in the database
|
||||
* representation to signify that autorenew doesn't end, and is mapped to empty here for the
|
||||
* purposes of more legible business logic.
|
||||
*/
|
||||
public B setAutorenewEndTime(Optional<DateTime> autorenewEndTime) {
|
||||
getInstance().autorenewEndTime = autorenewEndTime.orElse(null);
|
||||
getInstance().autorenewEndTime = autorenewEndTime.orElse(END_OF_TIME);
|
||||
return thisCastToDerived();
|
||||
}
|
||||
|
||||
|
||||
@@ -96,7 +96,15 @@ public class DomainHistory extends HistoryEntry implements SqlEntity {
|
||||
// TODO(b/166776754): Investigate if we can reuse domainContent.nsHosts for storing host keys.
|
||||
@Ignore
|
||||
@ElementCollection
|
||||
@JoinTable(name = "DomainHistoryHost")
|
||||
@JoinTable(
|
||||
name = "DomainHistoryHost",
|
||||
indexes = {
|
||||
@Index(
|
||||
columnList =
|
||||
"domain_history_history_revision_id,domain_history_domain_repo_id,host_repo_id",
|
||||
unique = true),
|
||||
})
|
||||
@ImmutableObject.EmptySetToNull
|
||||
@Column(name = "host_repo_id")
|
||||
Set<VKey<HostResource>> nsHosts;
|
||||
|
||||
@@ -173,7 +181,9 @@ public class DomainHistory extends HistoryEntry implements SqlEntity {
|
||||
* #getDomainTransactionRecords()}.
|
||||
*/
|
||||
@Access(AccessType.PROPERTY)
|
||||
@OneToMany(cascade = {CascadeType.ALL})
|
||||
@OneToMany(
|
||||
cascade = {CascadeType.ALL},
|
||||
fetch = FetchType.EAGER)
|
||||
@JoinColumn(name = "historyRevisionId", referencedColumnName = "historyRevisionId")
|
||||
@JoinColumn(name = "domainRepoId", referencedColumnName = "domainRepoId")
|
||||
@SuppressWarnings("unused")
|
||||
|
||||
@@ -23,6 +23,8 @@ import google.registry.model.billing.BillingEvent;
|
||||
import google.registry.model.billing.BillingEvent.Recurring;
|
||||
import google.registry.model.domain.rgp.GracePeriodStatus;
|
||||
import google.registry.model.ofy.ObjectifyService;
|
||||
import google.registry.persistence.BillingVKey.BillingEventVKey;
|
||||
import google.registry.persistence.BillingVKey.BillingRecurrenceVKey;
|
||||
import google.registry.persistence.VKey;
|
||||
import google.registry.schema.replay.DatastoreAndSqlEntity;
|
||||
import javax.annotation.Nullable;
|
||||
@@ -82,10 +84,8 @@ public class GracePeriod extends GracePeriodBase implements DatastoreAndSqlEntit
|
||||
instance.domainRepoId = checkArgumentNotNull(domainRepoId);
|
||||
instance.expirationTime = checkArgumentNotNull(expirationTime);
|
||||
instance.clientId = checkArgumentNotNull(clientId);
|
||||
instance.billingEventOneTime = billingEventOneTime;
|
||||
instance.billingEventOneTimeHistoryId = DomainBase.getHistoryId(billingEventOneTime);
|
||||
instance.billingEventRecurring = billingEventRecurring;
|
||||
instance.billingEventRecurringHistoryId = DomainBase.getHistoryId(billingEventRecurring);
|
||||
instance.billingEventOneTime = BillingEventVKey.create(billingEventOneTime);
|
||||
instance.billingEventRecurring = BillingRecurrenceVKey.create(billingEventRecurring);
|
||||
return instance;
|
||||
}
|
||||
|
||||
@@ -178,7 +178,6 @@ public class GracePeriod extends GracePeriodBase implements DatastoreAndSqlEntit
|
||||
public GracePeriod cloneAfterOfyLoad(String domainRepoId) {
|
||||
GracePeriod clone = clone(this);
|
||||
clone.domainRepoId = checkArgumentNotNull(domainRepoId);
|
||||
clone.restoreHistoryIds();
|
||||
return clone;
|
||||
}
|
||||
|
||||
@@ -190,20 +189,7 @@ public class GracePeriod extends GracePeriodBase implements DatastoreAndSqlEntit
|
||||
*/
|
||||
public GracePeriod cloneWithRecurringBillingEvent(VKey<BillingEvent.Recurring> recurring) {
|
||||
GracePeriod clone = clone(this);
|
||||
clone.billingEventRecurring = recurring;
|
||||
return clone;
|
||||
}
|
||||
|
||||
/**
|
||||
* Returns a clone of this {@link GracePeriod} with prepopulated {@link #gracePeriodId} generated
|
||||
* by {@link ObjectifyService#allocateId()}.
|
||||
*
|
||||
* <p>TODO(shicong): Figure out how to generate the id only when the entity is used for Cloud SQL.
|
||||
*/
|
||||
@VisibleForTesting
|
||||
public GracePeriod cloneWithPrepopulatedId() {
|
||||
GracePeriod clone = clone(this);
|
||||
clone.gracePeriodId = ObjectifyService.allocateId();
|
||||
clone.billingEventRecurring = BillingRecurrenceVKey.create(recurring);
|
||||
return clone;
|
||||
}
|
||||
|
||||
@@ -232,9 +218,7 @@ public class GracePeriod extends GracePeriodBase implements DatastoreAndSqlEntit
|
||||
instance.expirationTime = gracePeriod.expirationTime;
|
||||
instance.clientId = gracePeriod.clientId;
|
||||
instance.billingEventOneTime = gracePeriod.billingEventOneTime;
|
||||
instance.billingEventOneTimeHistoryId = gracePeriod.billingEventOneTimeHistoryId;
|
||||
instance.billingEventRecurring = gracePeriod.billingEventRecurring;
|
||||
instance.billingEventRecurringHistoryId = gracePeriod.billingEventRecurringHistoryId;
|
||||
return instance;
|
||||
}
|
||||
}
|
||||
|
||||
@@ -14,18 +14,14 @@
|
||||
|
||||
package google.registry.model.domain;
|
||||
|
||||
import com.googlecode.objectify.Key;
|
||||
import com.googlecode.objectify.annotation.Embed;
|
||||
import com.googlecode.objectify.annotation.Ignore;
|
||||
import google.registry.model.ImmutableObject;
|
||||
import google.registry.model.ModelUtils;
|
||||
import google.registry.model.billing.BillingEvent;
|
||||
import google.registry.model.billing.BillingEvent.OneTime;
|
||||
import google.registry.model.domain.rgp.GracePeriodStatus;
|
||||
import google.registry.persistence.BillingVKey.BillingEventVKey;
|
||||
import google.registry.persistence.BillingVKey.BillingRecurrenceVKey;
|
||||
import google.registry.persistence.VKey;
|
||||
import java.lang.reflect.Field;
|
||||
import java.util.LinkedHashMap;
|
||||
import java.util.Map;
|
||||
import javax.persistence.Access;
|
||||
import javax.persistence.AccessType;
|
||||
import javax.persistence.Column;
|
||||
@@ -68,24 +64,16 @@ public class GracePeriodBase extends ImmutableObject {
|
||||
* billingEventRecurring}) or for redemption grace periods (since deletes have no cost).
|
||||
*/
|
||||
// NB: Would @IgnoreSave(IfNull.class), but not allowed for @Embed collections.
|
||||
@Column(name = "billing_event_id")
|
||||
VKey<OneTime> billingEventOneTime = null;
|
||||
|
||||
@Ignore
|
||||
@Column(name = "billing_event_history_id")
|
||||
Long billingEventOneTimeHistoryId;
|
||||
@Access(AccessType.FIELD)
|
||||
BillingEventVKey billingEventOneTime = null;
|
||||
|
||||
/**
|
||||
* The recurring billing event corresponding to the action that triggered this grace period, if
|
||||
* applicable - i.e. if the action was an autorenew - or null in all other cases.
|
||||
*/
|
||||
// NB: Would @IgnoreSave(IfNull.class), but not allowed for @Embed collections.
|
||||
@Column(name = "billing_recurrence_id")
|
||||
VKey<BillingEvent.Recurring> billingEventRecurring = null;
|
||||
|
||||
@Ignore
|
||||
@Column(name = "billing_recurrence_history_id")
|
||||
Long billingEventRecurringHistoryId;
|
||||
@Access(AccessType.FIELD)
|
||||
BillingRecurrenceVKey billingEventRecurring = null;
|
||||
|
||||
public long getGracePeriodId() {
|
||||
return gracePeriodId;
|
||||
@@ -123,8 +111,7 @@ public class GracePeriodBase extends ImmutableObject {
|
||||
* period is not AUTO_RENEW.
|
||||
*/
|
||||
public VKey<BillingEvent.OneTime> getOneTimeBillingEvent() {
|
||||
restoreOfyKeys();
|
||||
return billingEventOneTime;
|
||||
return billingEventOneTime == null ? null : billingEventOneTime.createVKey();
|
||||
}
|
||||
|
||||
/**
|
||||
@@ -132,63 +119,6 @@ public class GracePeriodBase extends ImmutableObject {
|
||||
* period is AUTO_RENEW.
|
||||
*/
|
||||
public VKey<BillingEvent.Recurring> getRecurringBillingEvent() {
|
||||
restoreOfyKeys();
|
||||
return billingEventRecurring;
|
||||
}
|
||||
|
||||
/**
|
||||
* Restores history ids for composite VKeys after a load from datastore.
|
||||
*
|
||||
* <p>For use by DomainContent.load() ONLY.
|
||||
*/
|
||||
protected void restoreHistoryIds() {
|
||||
billingEventOneTimeHistoryId = DomainBase.getHistoryId(billingEventOneTime);
|
||||
billingEventRecurringHistoryId = DomainBase.getHistoryId(billingEventRecurring);
|
||||
}
|
||||
|
||||
/**
|
||||
* Override {@link ImmutableObject#getSignificantFields()} to exclude "id", which breaks equality
|
||||
* testing in the unit tests.
|
||||
*/
|
||||
@Override
|
||||
protected Map<Field, Object> getSignificantFields() {
|
||||
restoreOfyKeys();
|
||||
// Can't use streams or ImmutableMap because we can have null values.
|
||||
Map<Field, Object> result = new LinkedHashMap();
|
||||
for (Map.Entry<Field, Object> entry : ModelUtils.getFieldValues(this).entrySet()) {
|
||||
if (!entry.getKey().getName().equals("id")) {
|
||||
result.put(entry.getKey(), entry.getValue());
|
||||
}
|
||||
}
|
||||
return result;
|
||||
}
|
||||
|
||||
/**
|
||||
* Restores Ofy keys in the billing events.
|
||||
*
|
||||
* <p>This must be called by all methods that access the one time or recurring billing event keys.
|
||||
* When the billing event keys are loaded from SQL, they are loaded as asymmetric keys because the
|
||||
* database columns that we load them from do not contain all of the information necessary to
|
||||
* reconsitute the Ofy side of the key. In other cases, we restore the Ofy key during the
|
||||
* hibernate {@link javax.persistence.PostLoad} method from the other fields of the object, but we
|
||||
* have been unable to make this work with hibernate's internal persistence model in this case
|
||||
* because the {@link GracePeriod}'s hash code is evaluated prior to these calls, and would be
|
||||
* invalidated by changing the fields.
|
||||
*/
|
||||
private final synchronized void restoreOfyKeys() {
|
||||
if (billingEventOneTime != null && !billingEventOneTime.maybeGetOfyKey().isPresent()) {
|
||||
billingEventOneTime =
|
||||
DomainBase.restoreOfyFrom(
|
||||
Key.create(DomainBase.class, domainRepoId),
|
||||
billingEventOneTime,
|
||||
billingEventOneTimeHistoryId);
|
||||
}
|
||||
if (billingEventRecurring != null && !billingEventRecurring.maybeGetOfyKey().isPresent()) {
|
||||
billingEventRecurring =
|
||||
DomainBase.restoreOfyFrom(
|
||||
Key.create(DomainBase.class, domainRepoId),
|
||||
billingEventRecurring,
|
||||
billingEventRecurringHistoryId);
|
||||
}
|
||||
return billingEventRecurring == null ? null : billingEventRecurring.createVKey();
|
||||
}
|
||||
}
|
||||
|
||||
@@ -35,7 +35,7 @@ import javax.xml.bind.annotation.XmlType;
|
||||
*
|
||||
* @see <a href="http://tools.ietf.org/html/rfc5910">RFC 5910</a>
|
||||
* @see <a href="http://tools.ietf.org/html/rfc4034">RFC 4034</a>
|
||||
* <p>TODO(shicong): Rename this class to DomainDsData.
|
||||
* <p>TODO(b/177567432): Rename this class to DomainDsData.
|
||||
*/
|
||||
@Embed
|
||||
@XmlType(name = "dsData")
|
||||
|
||||
@@ -111,9 +111,9 @@ public class AllocationToken extends BackupGroupRoot implements Buildable, Datas
|
||||
@Nullable
|
||||
@Index
|
||||
@AttributeOverrides({
|
||||
@AttributeOverride(name = "domainRepoId", column = @Column(name = "redemption_domain_repo_id")),
|
||||
@AttributeOverride(name = "repoId", column = @Column(name = "redemption_domain_repo_id")),
|
||||
@AttributeOverride(
|
||||
name = "domainHistoryId",
|
||||
name = "historyRevisionId",
|
||||
column = @Column(name = "redemption_domain_history_id"))
|
||||
})
|
||||
DomainHistoryVKey redemptionHistoryEntry;
|
||||
@@ -192,8 +192,9 @@ public class AllocationToken extends BackupGroupRoot implements Buildable, Datas
|
||||
return token;
|
||||
}
|
||||
|
||||
public Optional<VKey<HistoryEntry>> getRedemptionHistoryEntry() {
|
||||
return Optional.ofNullable(redemptionHistoryEntry);
|
||||
public Optional<VKey<? extends HistoryEntry>> getRedemptionHistoryEntry() {
|
||||
return Optional.ofNullable(
|
||||
redemptionHistoryEntry == null ? null : redemptionHistoryEntry.createDomainHistoryVKey());
|
||||
}
|
||||
|
||||
public boolean isRedeemed() {
|
||||
@@ -291,9 +292,10 @@ public class AllocationToken extends BackupGroupRoot implements Buildable, Datas
|
||||
return this;
|
||||
}
|
||||
|
||||
public Builder setRedemptionHistoryEntry(DomainHistoryVKey redemptionHistoryEntry) {
|
||||
public Builder setRedemptionHistoryEntry(VKey<? extends HistoryEntry> redemptionHistoryEntry) {
|
||||
checkArgumentNotNull(redemptionHistoryEntry, "Redemption history entry must not be null");
|
||||
getInstance().redemptionHistoryEntry =
|
||||
checkArgumentNotNull(redemptionHistoryEntry, "Redemption history entry must not be null");
|
||||
DomainHistoryVKey.create(redemptionHistoryEntry.getOfyKey());
|
||||
return this;
|
||||
}
|
||||
|
||||
|
||||
@@ -35,6 +35,7 @@ import javax.persistence.Embeddable;
|
||||
import javax.persistence.MappedSuperclass;
|
||||
import javax.persistence.PostLoad;
|
||||
import javax.persistence.Transient;
|
||||
import javax.xml.bind.Unmarshaller;
|
||||
import javax.xml.bind.annotation.XmlElement;
|
||||
import javax.xml.bind.annotation.XmlTransient;
|
||||
import javax.xml.bind.annotation.adapters.CollapsedStringAdapter;
|
||||
@@ -58,8 +59,8 @@ import javax.xml.bind.annotation.adapters.XmlJavaTypeAdapter;
|
||||
public class Address extends ImmutableObject implements Jsonifiable {
|
||||
|
||||
/** The schema validation will enforce that this has 3 lines at most. */
|
||||
// TODO(shicong): Remove this field after migration. We need to figure out how to generate same
|
||||
// XML from streetLine[1,2,3].
|
||||
// TODO(b/177569726): Remove this field after migration. We need to figure out how to generate
|
||||
// same XML from streetLine[1,2,3].
|
||||
@XmlJavaTypeAdapter(NormalizedStringAdapter.class)
|
||||
@Transient
|
||||
List<String> street;
|
||||
@@ -174,15 +175,10 @@ public class Address extends ImmutableObject implements Jsonifiable {
|
||||
* entity from Datastore.
|
||||
*
|
||||
* <p>This callback method is used by Objectify to set streetLine[1,2,3] fields as they are not
|
||||
* persisted in the Datastore. TODO(shicong): Delete this method after database migration.
|
||||
* persisted in the Datastore.
|
||||
*/
|
||||
void onLoad(@AlsoLoad("street") List<String> street) {
|
||||
if (street == null || street.size() == 0) {
|
||||
return;
|
||||
}
|
||||
streetLine1 = street.get(0);
|
||||
streetLine2 = street.size() >= 2 ? street.get(1) : null;
|
||||
streetLine3 = street.size() >= 3 ? street.get(2) : null;
|
||||
mapStreetListToIndividualFields(street);
|
||||
}
|
||||
|
||||
/**
|
||||
@@ -202,4 +198,23 @@ public class Address extends ImmutableObject implements Jsonifiable {
|
||||
.filter(Objects::nonNull)
|
||||
.collect(toImmutableList());
|
||||
}
|
||||
|
||||
/**
|
||||
* Sets {@link #streetLine1}, {@link #streetLine2} and {@link #streetLine3} when the entity is
|
||||
* reconstructed from XML message.
|
||||
*
|
||||
* <p>This is a callback function that JAXB invokes after unmarshalling the XML message.
|
||||
*/
|
||||
void afterUnmarshal(Unmarshaller unmarshaller, Object parent) {
|
||||
mapStreetListToIndividualFields(street);
|
||||
}
|
||||
|
||||
private void mapStreetListToIndividualFields(List<String> street) {
|
||||
if (street == null || street.size() == 0) {
|
||||
return;
|
||||
}
|
||||
streetLine1 = street.get(0);
|
||||
streetLine2 = street.size() >= 2 ? street.get(1) : null;
|
||||
streetLine3 = street.size() >= 3 ? street.get(2) : null;
|
||||
}
|
||||
}
|
||||
|
||||
@@ -15,9 +15,11 @@
|
||||
package google.registry.model.index;
|
||||
|
||||
import static com.google.common.collect.ImmutableList.toImmutableList;
|
||||
import static com.google.common.collect.ImmutableMap.toImmutableMap;
|
||||
import static google.registry.config.RegistryConfig.getEppResourceCachingDuration;
|
||||
import static google.registry.config.RegistryConfig.getEppResourceMaxCachedEntries;
|
||||
import static google.registry.model.ofy.ObjectifyService.ofy;
|
||||
import static google.registry.persistence.transaction.TransactionManagerFactory.jpaTm;
|
||||
import static google.registry.persistence.transaction.TransactionManagerFactory.tm;
|
||||
import static google.registry.util.TypeUtils.instantiate;
|
||||
|
||||
@@ -29,6 +31,7 @@ import com.google.common.collect.ImmutableList;
|
||||
import com.google.common.collect.ImmutableMap;
|
||||
import com.google.common.collect.ImmutableSet;
|
||||
import com.google.common.collect.Maps;
|
||||
import com.google.common.collect.Multimaps;
|
||||
import com.google.common.collect.Streams;
|
||||
import com.googlecode.objectify.Key;
|
||||
import com.googlecode.objectify.annotation.Entity;
|
||||
@@ -44,6 +47,8 @@ import google.registry.model.host.HostResource;
|
||||
import google.registry.persistence.VKey;
|
||||
import google.registry.schema.replay.DatastoreOnlyEntity;
|
||||
import google.registry.util.NonFinalForTesting;
|
||||
import java.util.Comparator;
|
||||
import java.util.List;
|
||||
import java.util.Map;
|
||||
import java.util.Optional;
|
||||
import java.util.concurrent.ExecutionException;
|
||||
@@ -76,13 +81,21 @@ public abstract class ForeignKeyIndex<E extends EppResource> extends BackupGroup
|
||||
public static class ForeignKeyHostIndex extends ForeignKeyIndex<HostResource>
|
||||
implements DatastoreOnlyEntity {}
|
||||
|
||||
static final ImmutableMap<Class<? extends EppResource>, Class<? extends ForeignKeyIndex<?>>>
|
||||
private static final ImmutableMap<
|
||||
Class<? extends EppResource>, Class<? extends ForeignKeyIndex<?>>>
|
||||
RESOURCE_CLASS_TO_FKI_CLASS =
|
||||
ImmutableMap.of(
|
||||
ContactResource.class, ForeignKeyContactIndex.class,
|
||||
DomainBase.class, ForeignKeyDomainIndex.class,
|
||||
HostResource.class, ForeignKeyHostIndex.class);
|
||||
|
||||
private static final ImmutableMap<Class<? extends EppResource>, String>
|
||||
RESOURCE_CLASS_TO_FKI_PROPERTY =
|
||||
ImmutableMap.of(
|
||||
ContactResource.class, "contactId",
|
||||
DomainBase.class, "fullyQualifiedDomainName",
|
||||
HostResource.class, "fullyQualifiedHostName");
|
||||
|
||||
@Id String foreignKey;
|
||||
|
||||
/**
|
||||
@@ -179,9 +192,42 @@ public abstract class ForeignKeyIndex<E extends EppResource> extends BackupGroup
|
||||
*/
|
||||
public static <E extends EppResource> ImmutableMap<String, ForeignKeyIndex<E>> load(
|
||||
Class<E> clazz, Iterable<String> foreignKeys, final DateTime now) {
|
||||
return ofy().load().type(mapToFkiClass(clazz)).ids(foreignKeys).entrySet().stream()
|
||||
.filter(e -> now.isBefore(e.getValue().deletionTime))
|
||||
.collect(ImmutableMap.toImmutableMap(Map.Entry::getKey, Map.Entry::getValue));
|
||||
if (tm().isOfy()) {
|
||||
return ofy().load().type(mapToFkiClass(clazz)).ids(foreignKeys).entrySet().stream()
|
||||
.filter(e -> now.isBefore(e.getValue().deletionTime))
|
||||
.collect(toImmutableMap(Map.Entry::getKey, Map.Entry::getValue));
|
||||
} else {
|
||||
String property = RESOURCE_CLASS_TO_FKI_PROPERTY.get(clazz);
|
||||
List<E> entities =
|
||||
tm().transact(
|
||||
() -> {
|
||||
String entityName =
|
||||
jpaTm().getEntityManager().getMetamodel().entity(clazz).getName();
|
||||
return jpaTm()
|
||||
.getEntityManager()
|
||||
.createQuery(
|
||||
String.format(
|
||||
"FROM %s WHERE %s IN :propertyValue and deletionTime > :now ",
|
||||
entityName, property),
|
||||
clazz)
|
||||
.setParameter("propertyValue", foreignKeys)
|
||||
.setParameter("now", now)
|
||||
.getResultList();
|
||||
});
|
||||
// We need to find and return the entities with the maximum deletionTime for each foreign key.
|
||||
return Multimaps.index(entities, EppResource::getForeignKey).asMap().entrySet().stream()
|
||||
.map(
|
||||
entry ->
|
||||
Maps.immutableEntry(
|
||||
entry.getKey(),
|
||||
entry.getValue().stream()
|
||||
.max(Comparator.comparing(EppResource::getDeletionTime))
|
||||
.get()))
|
||||
.collect(
|
||||
toImmutableMap(
|
||||
Map.Entry::getKey,
|
||||
entry -> create(entry.getValue(), entry.getValue().getDeletionTime())));
|
||||
}
|
||||
}
|
||||
|
||||
static final CacheLoader<Key<ForeignKeyIndex<?>>, Optional<ForeignKeyIndex<?>>> CACHE_LOADER =
|
||||
@@ -266,7 +312,7 @@ public abstract class ForeignKeyIndex<E extends EppResource> extends BackupGroup
|
||||
.filter(entry -> entry.getValue().isPresent())
|
||||
.filter(entry -> now.isBefore(entry.getValue().get().getDeletionTime()))
|
||||
.collect(
|
||||
ImmutableMap.toImmutableMap(
|
||||
toImmutableMap(
|
||||
entry -> entry.getKey().getName(),
|
||||
entry -> (ForeignKeyIndex<E>) entry.getValue().get()));
|
||||
return fkisFromCache;
|
||||
|
||||
@@ -16,6 +16,7 @@ package google.registry.model.ofy;
|
||||
|
||||
import static com.google.common.collect.ImmutableList.toImmutableList;
|
||||
import static com.google.common.collect.ImmutableMap.toImmutableMap;
|
||||
import static com.google.common.collect.ImmutableSet.toImmutableSet;
|
||||
import static google.registry.model.ofy.ObjectifyService.ofy;
|
||||
import static google.registry.util.PreconditionsUtils.checkArgumentNotNull;
|
||||
|
||||
@@ -23,10 +24,13 @@ import com.google.common.base.Functions;
|
||||
import com.google.common.collect.ImmutableCollection;
|
||||
import com.google.common.collect.ImmutableList;
|
||||
import com.google.common.collect.ImmutableMap;
|
||||
import com.google.common.collect.ImmutableSet;
|
||||
import com.google.common.collect.Iterables;
|
||||
import com.google.common.collect.Streams;
|
||||
import com.googlecode.objectify.Key;
|
||||
import com.googlecode.objectify.Result;
|
||||
import google.registry.model.contact.ContactHistory;
|
||||
import google.registry.model.domain.DomainHistory;
|
||||
import google.registry.model.host.HostHistory;
|
||||
import google.registry.model.reporting.HistoryEntry;
|
||||
import google.registry.persistence.VKey;
|
||||
@@ -177,26 +181,13 @@ public class DatastoreTransactionManager implements TransactionManager {
|
||||
// VKey instead of by ofy Key. But ideally, there should be one set of TransactionManager
|
||||
// interface tests that are applied to both the datastore and SQL implementations.
|
||||
@Override
|
||||
public <T> Optional<T> maybeLoad(VKey<T> key) {
|
||||
public <T> Optional<T> loadByKeyIfPresent(VKey<T> key) {
|
||||
return Optional.ofNullable(loadNullable(key));
|
||||
}
|
||||
|
||||
@Override
|
||||
public <T> T load(VKey<T> key) {
|
||||
T result = loadNullable(key);
|
||||
if (result == null) {
|
||||
throw new NoSuchElementException(key.toString());
|
||||
}
|
||||
return result;
|
||||
}
|
||||
|
||||
@Override
|
||||
public <T> T load(T entity) {
|
||||
return ofy().load().entity(entity).now();
|
||||
}
|
||||
|
||||
@Override
|
||||
public <T> ImmutableMap<VKey<? extends T>, T> load(Iterable<? extends VKey<? extends T>> keys) {
|
||||
public <T> ImmutableMap<VKey<? extends T>, T> loadByKeysIfPresent(
|
||||
Iterable<? extends VKey<? extends T>> keys) {
|
||||
// Keep track of the Key -> VKey mapping so we can translate them back.
|
||||
ImmutableMap<Key<T>, VKey<? extends T>> keyMap =
|
||||
StreamSupport.stream(keys.spliterator(), false)
|
||||
@@ -211,13 +202,51 @@ public class DatastoreTransactionManager implements TransactionManager {
|
||||
}
|
||||
|
||||
@Override
|
||||
public <T> ImmutableList<T> loadAll(Class<T> clazz) {
|
||||
return ImmutableList.copyOf(getOfy().load().type(clazz));
|
||||
public <T> ImmutableList<T> loadByEntitiesIfPresent(Iterable<T> entities) {
|
||||
return ImmutableList.copyOf(getOfy().load().entities(entities).values());
|
||||
}
|
||||
|
||||
@Override
|
||||
public <T> ImmutableList<T> loadAll(Iterable<T> entities) {
|
||||
return ImmutableList.copyOf(getOfy().load().entities(entities).values());
|
||||
public <T> T loadByKey(VKey<T> key) {
|
||||
T result = loadNullable(key);
|
||||
if (result == null) {
|
||||
throw new NoSuchElementException(key.toString());
|
||||
}
|
||||
return result;
|
||||
}
|
||||
|
||||
@Override
|
||||
public <T> ImmutableMap<VKey<? extends T>, T> loadByKeys(
|
||||
Iterable<? extends VKey<? extends T>> keys) {
|
||||
ImmutableMap<VKey<? extends T>, T> result = loadByKeysIfPresent(keys);
|
||||
ImmutableSet<? extends VKey<? extends T>> missingKeys =
|
||||
Streams.stream(keys).filter(k -> !result.containsKey(k)).collect(toImmutableSet());
|
||||
if (!missingKeys.isEmpty()) {
|
||||
// Ofy ignores nonexistent keys but the method contract specifies to throw if nonexistent
|
||||
throw new NoSuchElementException(
|
||||
String.format("Failed to load nonexistent entities for keys: %s", missingKeys));
|
||||
}
|
||||
return result;
|
||||
}
|
||||
|
||||
@Override
|
||||
public <T> T loadByEntity(T entity) {
|
||||
return ofy().load().entity(entity).now();
|
||||
}
|
||||
|
||||
@Override
|
||||
public <T> ImmutableList<T> loadByEntities(Iterable<T> entities) {
|
||||
ImmutableList<T> result = loadByEntitiesIfPresent(entities);
|
||||
if (result.size() != Iterables.size(entities)) {
|
||||
throw new NoSuchElementException(
|
||||
String.format("Attempted to load entities, some of which are missing: %s", entities));
|
||||
}
|
||||
return result;
|
||||
}
|
||||
|
||||
@Override
|
||||
public <T> ImmutableList<T> loadAllOf(Class<T> clazz) {
|
||||
return ImmutableList.copyOf(getOfy().load().type(clazz));
|
||||
}
|
||||
|
||||
@Override
|
||||
@@ -264,6 +293,21 @@ public class DatastoreTransactionManager implements TransactionManager {
|
||||
getOfy().clearSessionCache();
|
||||
}
|
||||
|
||||
/**
|
||||
* Executes the given {@link Result} instance synchronously if not in a transaction.
|
||||
*
|
||||
* <p>The {@link Result} instance contains a task that will be executed by Objectify
|
||||
* asynchronously. If it is in a transaction, we don't need to execute the task immediately
|
||||
* because it is guaranteed to be done by the end of the transaction. However, if it is not in a
|
||||
* transaction, we need to execute it in case the following code expects that happens before
|
||||
* themselves.
|
||||
*/
|
||||
private void syncIfTransactionless(Result<?> result) {
|
||||
if (!inTransaction()) {
|
||||
result.now();
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* The following three methods exist due to the migration to Cloud SQL.
|
||||
*
|
||||
@@ -281,34 +325,23 @@ public class DatastoreTransactionManager implements TransactionManager {
|
||||
syncIfTransactionless(getOfy().save().entity(entity));
|
||||
}
|
||||
|
||||
@SuppressWarnings("unchecked")
|
||||
private <T> T toChildHistoryEntryIfPossible(@Nullable T obj) {
|
||||
// NB: The Key of the object in question may not necessarily be the resulting class that we
|
||||
// wish to have. Because all *History classes are @EntitySubclasses, their Keys will have type
|
||||
// HistoryEntry -- even if you create them based off the *History class.
|
||||
if (obj != null && HistoryEntry.class.isAssignableFrom(obj.getClass())) {
|
||||
return (T) ((HistoryEntry) obj).toChildHistoryEntity();
|
||||
}
|
||||
return obj;
|
||||
}
|
||||
|
||||
@Nullable
|
||||
private <T> T loadNullable(VKey<T> key) {
|
||||
return toChildHistoryEntryIfPossible(getOfy().load().key(key.getOfyKey()).now());
|
||||
}
|
||||
|
||||
/**
|
||||
* Executes the given {@link Result} instance synchronously if not in a transaction.
|
||||
*
|
||||
* <p>The {@link Result} instance contains a task that will be executed by Objectify
|
||||
* asynchronously. If it is in a transaction, we don't need to execute the task immediately
|
||||
* because it is guaranteed to be done by the end of the transaction. However, if it is not in a
|
||||
* transaction, we need to execute it in case the following code expects that happens before
|
||||
* themselves.
|
||||
*/
|
||||
private void syncIfTransactionless(Result<?> result) {
|
||||
if (!inTransaction()) {
|
||||
result.now();
|
||||
/** Converts a nonnull {@link HistoryEntry} to the child format, e.g. {@link DomainHistory} */
|
||||
@SuppressWarnings("unchecked")
|
||||
public static <T> T toChildHistoryEntryIfPossible(@Nullable T obj) {
|
||||
// NB: The Key of the object in question may not necessarily be the resulting class that we
|
||||
// wish to have. Because all *History classes are @EntitySubclasses, their Keys will have type
|
||||
// HistoryEntry -- even if you create them based off the *History class.
|
||||
if (obj instanceof HistoryEntry
|
||||
&& !(obj instanceof ContactHistory)
|
||||
&& !(obj instanceof DomainHistory)
|
||||
&& !(obj instanceof HostHistory)) {
|
||||
return (T) ((HistoryEntry) obj).toChildHistoryEntity();
|
||||
}
|
||||
return obj;
|
||||
}
|
||||
}
|
||||
|
||||
@@ -42,8 +42,8 @@ import google.registry.model.translators.CidrAddressBlockTranslatorFactory;
|
||||
import google.registry.model.translators.CommitLogRevisionsTranslatorFactory;
|
||||
import google.registry.model.translators.CreateAutoTimestampTranslatorFactory;
|
||||
import google.registry.model.translators.CurrencyUnitTranslatorFactory;
|
||||
import google.registry.model.translators.DomainHistoryVKeyTranslatorFactory;
|
||||
import google.registry.model.translators.DurationTranslatorFactory;
|
||||
import google.registry.model.translators.EppHistoryVKeyTranslatorFactory;
|
||||
import google.registry.model.translators.InetAddressTranslatorFactory;
|
||||
import google.registry.model.translators.ReadableInstantUtcTranslatorFactory;
|
||||
import google.registry.model.translators.UpdateAutoTimestampTranslatorFactory;
|
||||
@@ -128,7 +128,7 @@ public class ObjectifyService {
|
||||
new CreateAutoTimestampTranslatorFactory(),
|
||||
new CurrencyUnitTranslatorFactory(),
|
||||
new DurationTranslatorFactory(),
|
||||
new DomainHistoryVKeyTranslatorFactory(),
|
||||
new EppHistoryVKeyTranslatorFactory(),
|
||||
new InetAddressTranslatorFactory(),
|
||||
new MoneyStringTranslatorFactory(),
|
||||
new ReadableInstantUtcTranslatorFactory(),
|
||||
|
||||
@@ -14,7 +14,18 @@
|
||||
|
||||
package google.registry.model.ofy;
|
||||
|
||||
import static google.registry.model.ofy.EntityWritePriorities.getEntityPriority;
|
||||
import static google.registry.model.ofy.ObjectifyService.ofy;
|
||||
import static google.registry.persistence.transaction.TransactionManagerFactory.jpaTm;
|
||||
|
||||
import com.google.common.collect.ImmutableMap;
|
||||
import com.googlecode.objectify.Key;
|
||||
import google.registry.config.RegistryEnvironment;
|
||||
import google.registry.model.UpdateAutoTimestamp;
|
||||
import google.registry.persistence.VKey;
|
||||
import google.registry.schema.replay.DatastoreEntity;
|
||||
import java.util.HashMap;
|
||||
import java.util.Map;
|
||||
import java.util.concurrent.ConcurrentLinkedQueue;
|
||||
|
||||
/**
|
||||
@@ -24,23 +35,79 @@ import java.util.concurrent.ConcurrentLinkedQueue;
|
||||
*/
|
||||
public class ReplayQueue {
|
||||
|
||||
static ConcurrentLinkedQueue<TransactionInfo> queue =
|
||||
new ConcurrentLinkedQueue<TransactionInfo>();
|
||||
static ConcurrentLinkedQueue<ImmutableMap<Key<?>, Object>> queue =
|
||||
new ConcurrentLinkedQueue<ImmutableMap<Key<?>, Object>>();
|
||||
|
||||
static void addInTests(TransactionInfo info) {
|
||||
if (RegistryEnvironment.get() == RegistryEnvironment.UNITTEST) {
|
||||
queue.add(info);
|
||||
// Transform the entities to be persisted to the set of values as they were actually
|
||||
// persisted.
|
||||
ImmutableMap.Builder<Key<?>, Object> builder = new ImmutableMap.Builder<Key<?>, Object>();
|
||||
for (ImmutableMap.Entry<Key<?>, Object> entry : info.getChanges().entrySet()) {
|
||||
if (entry.getValue().equals(TransactionInfo.Delete.SENTINEL)) {
|
||||
builder.put(entry.getKey(), entry.getValue());
|
||||
} else {
|
||||
// The value is an entity object that has not yet been persisted, and thus some of the
|
||||
// special transformations that we do (notably the auto-timestamp transformations) have
|
||||
// not been applied. Converting the object to an entity and then back again performs
|
||||
// those transformations so that we persist the same values to SQL that we have in
|
||||
// Datastore.
|
||||
builder.put(entry.getKey(), ofy().toPojo(ofy().toEntity(entry.getValue())));
|
||||
}
|
||||
}
|
||||
queue.add(builder.build());
|
||||
}
|
||||
}
|
||||
|
||||
public static void replay() {
|
||||
TransactionInfo info;
|
||||
while ((info = queue.poll()) != null) {
|
||||
info.saveToJpa();
|
||||
/** Replay all transactions, return the set of keys that were replayed. */
|
||||
public static ImmutableMap<Key<?>, Object> replay() {
|
||||
// We can't use an ImmutableMap.Builder here, we need to be able to overwrite existing values
|
||||
// and the builder doesn't support that.
|
||||
Map<Key<?>, Object> result = new HashMap<Key<?>, Object>();
|
||||
ImmutableMap<Key<?>, Object> changes;
|
||||
while ((changes = queue.poll()) != null) {
|
||||
saveToJpa(changes);
|
||||
result.putAll(changes);
|
||||
}
|
||||
|
||||
return ImmutableMap.copyOf(result);
|
||||
}
|
||||
|
||||
public static void clear() {
|
||||
queue.clear();
|
||||
}
|
||||
|
||||
/** Returns the priority of the entity type in the map entry. */
|
||||
private static int getPriority(ImmutableMap.Entry<Key<?>, Object> entry) {
|
||||
return getEntityPriority(
|
||||
entry.getKey().getKind(), entry.getValue().equals(TransactionInfo.Delete.SENTINEL));
|
||||
}
|
||||
|
||||
private static int compareByPriority(
|
||||
ImmutableMap.Entry<Key<?>, Object> a, ImmutableMap.Entry<Key<?>, Object> b) {
|
||||
return getPriority(a) - getPriority(b);
|
||||
}
|
||||
|
||||
private static void saveToJpa(ImmutableMap<Key<?>, Object> changes) {
|
||||
try (UpdateAutoTimestamp.DisableAutoUpdateResource disabler =
|
||||
UpdateAutoTimestamp.disableAutoUpdate()) {
|
||||
// Sort the changes into an order that will work for insertion into the database.
|
||||
jpaTm()
|
||||
.transact(
|
||||
() -> {
|
||||
changes.entrySet().stream()
|
||||
.sorted(ReplayQueue::compareByPriority)
|
||||
.forEach(
|
||||
entry -> {
|
||||
if (entry.getValue().equals(TransactionInfo.Delete.SENTINEL)) {
|
||||
jpaTm().delete(VKey.from(entry.getKey()));
|
||||
} else {
|
||||
((DatastoreEntity) entry.getValue())
|
||||
.toSqlEntity()
|
||||
.ifPresent(jpaTm()::put);
|
||||
}
|
||||
});
|
||||
});
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -20,24 +20,20 @@ import static com.google.common.collect.ImmutableSet.toImmutableSet;
|
||||
import static com.google.common.collect.Maps.filterValues;
|
||||
import static com.google.common.collect.Maps.toMap;
|
||||
import static google.registry.model.ofy.CommitLogBucket.getArbitraryBucketId;
|
||||
import static google.registry.model.ofy.EntityWritePriorities.getEntityPriority;
|
||||
import static google.registry.model.ofy.ObjectifyService.ofy;
|
||||
import static google.registry.persistence.transaction.TransactionManagerFactory.jpaTm;
|
||||
|
||||
import com.google.common.annotations.VisibleForTesting;
|
||||
import com.google.common.collect.ImmutableMap;
|
||||
import com.google.common.collect.ImmutableSet;
|
||||
import com.googlecode.objectify.Key;
|
||||
import google.registry.persistence.VKey;
|
||||
import google.registry.schema.replay.DatastoreEntity;
|
||||
import java.util.Map;
|
||||
import org.joda.time.DateTime;
|
||||
|
||||
/** Metadata for an {@link Ofy} transaction that saves commit logs. */
|
||||
class TransactionInfo {
|
||||
public class TransactionInfo {
|
||||
|
||||
@VisibleForTesting
|
||||
enum Delete {
|
||||
public enum Delete {
|
||||
SENTINEL
|
||||
}
|
||||
|
||||
@@ -87,6 +83,10 @@ class TransactionInfo {
|
||||
return ImmutableSet.copyOf(changesBuilder.build().keySet());
|
||||
}
|
||||
|
||||
ImmutableMap<Key<?>, Object> getChanges() {
|
||||
return changesBuilder.build();
|
||||
}
|
||||
|
||||
ImmutableSet<Key<?>> getDeletes() {
|
||||
return ImmutableSet.copyOf(
|
||||
filterValues(changesBuilder.build(), Delete.SENTINEL::equals).keySet());
|
||||
@@ -100,35 +100,4 @@ class TransactionInfo {
|
||||
.filter(not(Delete.SENTINEL::equals))
|
||||
.collect(toImmutableSet());
|
||||
}
|
||||
|
||||
/** Returns the weight of the entity type in the map entry. */
|
||||
@VisibleForTesting
|
||||
static int getWeight(ImmutableMap.Entry<Key<?>, Object> entry) {
|
||||
return getEntityPriority(entry.getKey().getKind(), entry.getValue().equals(Delete.SENTINEL));
|
||||
}
|
||||
|
||||
private static int compareByWeight(
|
||||
ImmutableMap.Entry<Key<?>, Object> a, ImmutableMap.Entry<Key<?>, Object> b) {
|
||||
return getWeight(a) - getWeight(b);
|
||||
}
|
||||
|
||||
void saveToJpa() {
|
||||
// Sort the changes into an order that will work for insertion into the database.
|
||||
jpaTm()
|
||||
.transact(
|
||||
() -> {
|
||||
changesBuilder.build().entrySet().stream()
|
||||
.sorted(TransactionInfo::compareByWeight)
|
||||
.forEach(
|
||||
entry -> {
|
||||
if (entry.getValue().equals(Delete.SENTINEL)) {
|
||||
jpaTm().delete(VKey.from(entry.getKey()));
|
||||
} else {
|
||||
((DatastoreEntity) entry.getValue())
|
||||
.toSqlEntity()
|
||||
.ifPresent(jpaTm()::put);
|
||||
}
|
||||
});
|
||||
});
|
||||
}
|
||||
}
|
||||
|
||||
@@ -16,6 +16,7 @@ package google.registry.model.poll;
|
||||
|
||||
import static com.google.common.collect.ImmutableList.toImmutableList;
|
||||
import static google.registry.util.CollectionUtils.forceEmptyToNull;
|
||||
import static google.registry.util.CollectionUtils.isNullOrEmpty;
|
||||
import static google.registry.util.CollectionUtils.nullToEmpty;
|
||||
import static google.registry.util.DateTimeUtils.END_OF_TIME;
|
||||
import static google.registry.util.PreconditionsUtils.checkArgumentNotNull;
|
||||
@@ -52,6 +53,7 @@ import google.registry.persistence.WithLongVKey;
|
||||
import google.registry.schema.replay.DatastoreAndSqlEntity;
|
||||
import java.util.List;
|
||||
import java.util.Optional;
|
||||
import javax.annotation.Nullable;
|
||||
import javax.persistence.AttributeOverride;
|
||||
import javax.persistence.AttributeOverrides;
|
||||
import javax.persistence.Column;
|
||||
@@ -185,6 +187,7 @@ public abstract class PollMessage extends ImmutableObject
|
||||
@Override
|
||||
public abstract VKey<? extends PollMessage> createVKey();
|
||||
|
||||
/** Static VKey factory method for use by VKeyTranslatorFactory. */
|
||||
public static VKey<PollMessage> createVKey(Key<PollMessage> key) {
|
||||
return VKey.create(PollMessage.class, key.getId(), key);
|
||||
}
|
||||
@@ -289,10 +292,10 @@ public abstract class PollMessage extends ImmutableObject
|
||||
|
||||
@Transient List<ContactTransferResponse> contactTransferResponses;
|
||||
|
||||
@Transient
|
||||
@Transient @ImmutableObject.DoNotCompare
|
||||
List<DomainPendingActionNotificationResponse> domainPendingActionNotificationResponses;
|
||||
|
||||
@Transient List<DomainTransferResponse> domainTransferResponses;
|
||||
@Transient @ImmutableObject.DoNotCompare List<DomainTransferResponse> domainTransferResponses;
|
||||
|
||||
@Transient List<HostPendingActionNotificationResponse> hostPendingActionNotificationResponses;
|
||||
|
||||
@@ -355,6 +358,11 @@ public abstract class PollMessage extends ImmutableObject
|
||||
return VKey.create(OneTime.class, getId(), Key.create(this));
|
||||
}
|
||||
|
||||
/** Converts an unspecialized VKey<PollMessage> to a VKey of the derived class. */
|
||||
public static @Nullable VKey<OneTime> convertVKey(@Nullable VKey<OneTime> key) {
|
||||
return key == null ? null : VKey.create(OneTime.class, key.getSqlKey(), key.getOfyKey());
|
||||
}
|
||||
|
||||
@Override
|
||||
public Builder asBuilder() {
|
||||
return new Builder(clone(this));
|
||||
@@ -371,6 +379,47 @@ public abstract class PollMessage extends ImmutableObject
|
||||
.build();
|
||||
}
|
||||
|
||||
@Override
|
||||
@OnLoad
|
||||
void onLoad() {
|
||||
super.onLoad();
|
||||
if (!isNullOrEmpty(contactPendingActionNotificationResponses)) {
|
||||
pendingActionNotificationResponse = contactPendingActionNotificationResponses.get(0);
|
||||
}
|
||||
if (!isNullOrEmpty(contactTransferResponses)) {
|
||||
contactId = contactTransferResponses.get(0).getContactId();
|
||||
transferResponse = contactTransferResponses.get(0);
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
@PostLoad
|
||||
void postLoad() {
|
||||
super.postLoad();
|
||||
if (pendingActionNotificationResponse != null) {
|
||||
contactPendingActionNotificationResponses =
|
||||
ImmutableList.of(
|
||||
ContactPendingActionNotificationResponse.create(
|
||||
pendingActionNotificationResponse.nameOrId.value,
|
||||
pendingActionNotificationResponse.getActionResult(),
|
||||
pendingActionNotificationResponse.getTrid(),
|
||||
pendingActionNotificationResponse.processedDate));
|
||||
}
|
||||
if (contactId != null && transferResponse != null) {
|
||||
contactTransferResponses =
|
||||
ImmutableList.of(
|
||||
new ContactTransferResponse.Builder()
|
||||
.setContactId(contactId)
|
||||
.setGainingClientId(transferResponse.getGainingClientId())
|
||||
.setLosingClientId(transferResponse.getLosingClientId())
|
||||
.setTransferStatus(transferResponse.getTransferStatus())
|
||||
.setTransferRequestTime(transferResponse.getTransferRequestTime())
|
||||
.setPendingTransferExpirationTime(
|
||||
transferResponse.getPendingTransferExpirationTime())
|
||||
.build());
|
||||
}
|
||||
}
|
||||
|
||||
/** A builder for {@link OneTime} since it is immutable. */
|
||||
public static class Builder extends PollMessage.Builder<OneTime, Builder> {
|
||||
|
||||
@@ -389,6 +438,10 @@ public abstract class PollMessage extends ImmutableObject
|
||||
.filter(ContactPendingActionNotificationResponse.class::isInstance)
|
||||
.map(ContactPendingActionNotificationResponse.class::cast)
|
||||
.collect(toImmutableList()));
|
||||
if (getInstance().contactPendingActionNotificationResponses != null) {
|
||||
getInstance().pendingActionNotificationResponse =
|
||||
getInstance().contactPendingActionNotificationResponses.get(0);
|
||||
}
|
||||
getInstance().contactTransferResponses =
|
||||
forceEmptyToNull(
|
||||
responseData
|
||||
@@ -396,6 +449,11 @@ public abstract class PollMessage extends ImmutableObject
|
||||
.filter(ContactTransferResponse.class::isInstance)
|
||||
.map(ContactTransferResponse.class::cast)
|
||||
.collect(toImmutableList()));
|
||||
if (getInstance().contactTransferResponses != null) {
|
||||
getInstance().contactId = getInstance().contactTransferResponses.get(0).getContactId();
|
||||
getInstance().transferResponse = getInstance().contactTransferResponses.get(0);
|
||||
}
|
||||
|
||||
getInstance().domainPendingActionNotificationResponses =
|
||||
forceEmptyToNull(
|
||||
responseData
|
||||
@@ -456,6 +514,11 @@ public abstract class PollMessage extends ImmutableObject
|
||||
return VKey.create(Autorenew.class, getId(), Key.create(this));
|
||||
}
|
||||
|
||||
/** Converts an unspecialized VKey<PollMessage> to a VKey of the derived class. */
|
||||
public static @Nullable VKey<Autorenew> convertVKey(VKey<Autorenew> key) {
|
||||
return key == null ? null : VKey.create(Autorenew.class, key.getSqlKey(), key.getOfyKey());
|
||||
}
|
||||
|
||||
@Override
|
||||
public ImmutableList<ResponseData> getResponseData() {
|
||||
// Note that the event time is when the auto-renew occured, so the expiration time in the
|
||||
|
||||
@@ -97,7 +97,7 @@ public final class RdeRevision extends BackupGroupRoot implements NonReplicatedE
|
||||
RdeRevisionId sqlKey = RdeRevisionId.create(tld, date.toLocalDate(), mode);
|
||||
Key<RdeRevision> ofyKey = Key.create(RdeRevision.class, id);
|
||||
Optional<RdeRevision> revisionOptional =
|
||||
tm().maybeLoad(VKey.create(RdeRevision.class, sqlKey, ofyKey));
|
||||
tm().loadByKeyIfPresent(VKey.create(RdeRevision.class, sqlKey, ofyKey));
|
||||
return revisionOptional.map(rdeRevision -> rdeRevision.revision + 1).orElse(0);
|
||||
}
|
||||
|
||||
@@ -117,7 +117,7 @@ public final class RdeRevision extends BackupGroupRoot implements NonReplicatedE
|
||||
RdeRevisionId sqlKey = RdeRevisionId.create(tld, date.toLocalDate(), mode);
|
||||
Key<RdeRevision> ofyKey = Key.create(RdeRevision.class, triplet);
|
||||
Optional<RdeRevision> revisionOptional =
|
||||
tm().maybeLoad(VKey.create(RdeRevision.class, sqlKey, ofyKey));
|
||||
tm().loadByKeyIfPresent(VKey.create(RdeRevision.class, sqlKey, ofyKey));
|
||||
if (revision == 0) {
|
||||
revisionOptional.ifPresent(
|
||||
rdeRevision -> {
|
||||
|
||||
@@ -234,7 +234,7 @@ public class Registrar extends ImmutableObject
|
||||
* Unique registrar client id. Must conform to "clIDType" as defined in RFC5730.
|
||||
*
|
||||
* @see <a href="http://tools.ietf.org/html/rfc5730#section-4.2">Shared Structure Schema</a>
|
||||
* <p>TODO(shicong): Rename this field to clientId
|
||||
* <p>TODO(b/177568946): Rename this field to registrarId.
|
||||
*/
|
||||
@Id
|
||||
@javax.persistence.Id
|
||||
@@ -537,20 +537,24 @@ public class Registrar extends ImmutableObject
|
||||
return LIVE_STATES.contains(state) && PUBLICLY_VISIBLE_TYPES.contains(type);
|
||||
}
|
||||
|
||||
public String getClientCertificate() {
|
||||
return clientCertificate;
|
||||
/** Returns the client certificate string if it has been set, or empty otherwise. */
|
||||
public Optional<String> getClientCertificate() {
|
||||
return Optional.ofNullable(clientCertificate);
|
||||
}
|
||||
|
||||
public String getClientCertificateHash() {
|
||||
return clientCertificateHash;
|
||||
/** Returns the client certificate hash if it has been set, or empty otherwise. */
|
||||
public Optional<String> getClientCertificateHash() {
|
||||
return Optional.ofNullable(clientCertificateHash);
|
||||
}
|
||||
|
||||
public String getFailoverClientCertificate() {
|
||||
return failoverClientCertificate;
|
||||
/** Returns the failover client certificate string if it has been set, or empty otherwise. */
|
||||
public Optional<String> getFailoverClientCertificate() {
|
||||
return Optional.ofNullable(failoverClientCertificate);
|
||||
}
|
||||
|
||||
public String getFailoverClientCertificateHash() {
|
||||
return failoverClientCertificateHash;
|
||||
/** Returns the failover client certificate hash if it has been set, or empty otherwise. */
|
||||
public Optional<String> getFailoverClientCertificateHash() {
|
||||
return Optional.ofNullable(failoverClientCertificateHash);
|
||||
}
|
||||
|
||||
public ImmutableList<CidrAddressBlock> getIpAddressAllowList() {
|
||||
@@ -815,7 +819,8 @@ public class Registrar extends ImmutableObject
|
||||
.map(Registry::createVKey)
|
||||
.collect(toImmutableSet());
|
||||
Set<VKey<Registry>> missingTldKeys =
|
||||
Sets.difference(newTldKeys, transactIfJpaTm(() -> tm().load(newTldKeys)).keySet());
|
||||
Sets.difference(
|
||||
newTldKeys, transactIfJpaTm(() -> tm().loadByKeysIfPresent(newTldKeys)).keySet());
|
||||
checkArgument(missingTldKeys.isEmpty(), "Trying to set nonexisting TLDs: %s", missingTldKeys);
|
||||
getInstance().allowedTlds = ImmutableSortedSet.copyOf(allowedTlds);
|
||||
return this;
|
||||
@@ -983,7 +988,7 @@ public class Registrar extends ImmutableObject
|
||||
public static Iterable<Registrar> loadAll() {
|
||||
return tm().isOfy()
|
||||
? ImmutableList.copyOf(ofy().load().type(Registrar.class).ancestor(getCrossTldKey()))
|
||||
: tm().transact(() -> tm().loadAll(Registrar.class));
|
||||
: tm().transact(() -> tm().loadAllOf(Registrar.class));
|
||||
}
|
||||
|
||||
/** Loads all registrar entities using an in-memory cache. */
|
||||
@@ -994,7 +999,7 @@ public class Registrar extends ImmutableObject
|
||||
/** Loads and returns a registrar entity by its client id directly from Datastore. */
|
||||
public static Optional<Registrar> loadByClientId(String clientId) {
|
||||
checkArgument(!Strings.isNullOrEmpty(clientId), "clientId must be specified");
|
||||
return transactIfJpaTm(() -> tm().maybeLoad(createVKey(clientId)));
|
||||
return transactIfJpaTm(() -> tm().loadByKeyIfPresent(createVKey(clientId)));
|
||||
}
|
||||
|
||||
/**
|
||||
|
||||
@@ -69,7 +69,7 @@ public final class Registries {
|
||||
.stream()
|
||||
.map(Key::getName)
|
||||
.collect(toImmutableSet())
|
||||
: tm().loadAll(Registry.class).stream()
|
||||
: tm().loadAllOf(Registry.class).stream()
|
||||
.map(Registry::getTldStr)
|
||||
.collect(toImmutableSet());
|
||||
return Registry.getAll(tlds).stream()
|
||||
|
||||
@@ -267,7 +267,7 @@ public class Registry extends ImmutableObject implements Buildable, DatastoreAnd
|
||||
public Optional<Registry> load(final String tld) {
|
||||
// Enter a transaction-less context briefly; we don't want to enroll every TLD in
|
||||
// a transaction that might be wrapping this call.
|
||||
return tm().doTransactionless(() -> tm().maybeLoad(createVKey(tld)));
|
||||
return tm().doTransactionless(() -> tm().loadByKeyIfPresent(createVKey(tld)));
|
||||
}
|
||||
|
||||
@Override
|
||||
@@ -275,7 +275,7 @@ public class Registry extends ImmutableObject implements Buildable, DatastoreAnd
|
||||
ImmutableMap<String, VKey<Registry>> keysMap =
|
||||
toMap(ImmutableSet.copyOf(tlds), Registry::createVKey);
|
||||
Map<VKey<? extends Registry>, Registry> entities =
|
||||
tm().doTransactionless(() -> tm().load(keysMap.values()));
|
||||
tm().doTransactionless(() -> tm().loadByKeys(keysMap.values()));
|
||||
return Maps.transformEntries(
|
||||
keysMap, (k, v) -> Optional.ofNullable(entities.getOrDefault(v, null)));
|
||||
}
|
||||
|
||||
@@ -62,7 +62,7 @@ public class ReservedListDualWriteDao {
|
||||
public static Optional<ReservedList> getLatestRevision(String reservedListName) {
|
||||
Optional<ReservedList> maybeDatastoreList =
|
||||
ofyTm()
|
||||
.maybeLoad(
|
||||
.loadByKeyIfPresent(
|
||||
VKey.createOfy(
|
||||
ReservedList.class,
|
||||
Key.create(getCrossTldKey(), ReservedList.class, reservedListName)));
|
||||
|
||||
@@ -49,7 +49,9 @@ public class DomainTransactionRecord extends ImmutableObject
|
||||
|
||||
@Id
|
||||
@Ignore
|
||||
@ImmutableObject.DoNotCompare
|
||||
@GeneratedValue(strategy = GenerationType.IDENTITY)
|
||||
@ImmutableObject.Insignificant
|
||||
Long id;
|
||||
|
||||
/** The TLD this record operates on. */
|
||||
|
||||
@@ -198,6 +198,7 @@ public class HistoryEntry extends ImmutableObject implements Buildable, Datastor
|
||||
* transaction counts (such as contact or host mutations).
|
||||
*/
|
||||
@Transient // domain-specific
|
||||
@ImmutableObject.EmptySetToNull
|
||||
protected Set<DomainTransactionRecord> domainTransactionRecords;
|
||||
|
||||
public long getId() {
|
||||
|
||||
@@ -0,0 +1,144 @@
|
||||
// Copyright 2020 The Nomulus Authors. All Rights Reserved.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package google.registry.model.reporting;
|
||||
|
||||
import static com.google.common.collect.ImmutableList.toImmutableList;
|
||||
import static google.registry.model.ofy.ObjectifyService.ofy;
|
||||
import static google.registry.persistence.transaction.TransactionManagerFactory.jpaTm;
|
||||
import static google.registry.persistence.transaction.TransactionManagerFactory.tm;
|
||||
import static google.registry.util.DateTimeUtils.END_OF_TIME;
|
||||
import static google.registry.util.DateTimeUtils.START_OF_TIME;
|
||||
|
||||
import com.google.common.collect.Iterables;
|
||||
import google.registry.model.EppResource;
|
||||
import google.registry.model.contact.ContactHistory;
|
||||
import google.registry.model.contact.ContactResource;
|
||||
import google.registry.model.domain.DomainBase;
|
||||
import google.registry.model.domain.DomainHistory;
|
||||
import google.registry.model.host.HostHistory;
|
||||
import google.registry.model.host.HostResource;
|
||||
import google.registry.persistence.VKey;
|
||||
import java.util.Comparator;
|
||||
import javax.persistence.EntityManager;
|
||||
import org.joda.time.DateTime;
|
||||
|
||||
/**
|
||||
* Retrieves {@link HistoryEntry} descendants (e.g. {@link DomainHistory}).
|
||||
*
|
||||
* <p>This class is configured to retrieve either from Datastore or SQL, depending on which database
|
||||
* is currently considered the primary database.
|
||||
*/
|
||||
public class HistoryEntryDao {
|
||||
|
||||
/** Loads all history objects in the times specified, including all types. */
|
||||
public static Iterable<? extends HistoryEntry> loadAllHistoryObjects(
|
||||
DateTime afterTime, DateTime beforeTime) {
|
||||
if (tm().isOfy()) {
|
||||
return ofy()
|
||||
.load()
|
||||
.type(HistoryEntry.class)
|
||||
.order("modificationTime")
|
||||
.filter("modificationTime >=", afterTime)
|
||||
.filter("modificationTime <=", beforeTime);
|
||||
} else {
|
||||
return jpaTm()
|
||||
.transact(
|
||||
() ->
|
||||
Iterables.concat(
|
||||
loadAllHistoryObjectsFromSql(ContactHistory.class, afterTime, beforeTime),
|
||||
loadAllHistoryObjectsFromSql(DomainHistory.class, afterTime, beforeTime),
|
||||
loadAllHistoryObjectsFromSql(HostHistory.class, afterTime, beforeTime)));
|
||||
}
|
||||
}
|
||||
|
||||
/** Loads all history objects corresponding to the given {@link EppResource}. */
|
||||
public static Iterable<? extends HistoryEntry> loadHistoryObjectsForResource(
|
||||
VKey<? extends EppResource> parentKey) {
|
||||
return loadHistoryObjectsForResource(parentKey, START_OF_TIME, END_OF_TIME);
|
||||
}
|
||||
|
||||
/** Loads all history objects in the time period specified for the given {@link EppResource}. */
|
||||
public static Iterable<? extends HistoryEntry> loadHistoryObjectsForResource(
|
||||
VKey<? extends EppResource> parentKey, DateTime afterTime, DateTime beforeTime) {
|
||||
if (tm().isOfy()) {
|
||||
return ofy()
|
||||
.load()
|
||||
.type(HistoryEntry.class)
|
||||
.ancestor(parentKey.getOfyKey())
|
||||
.order("modificationTime")
|
||||
.filter("modificationTime >=", afterTime)
|
||||
.filter("modificationTime <=", beforeTime);
|
||||
} else {
|
||||
return jpaTm()
|
||||
.transact(() -> loadHistoryObjectsForResourceFromSql(parentKey, afterTime, beforeTime));
|
||||
}
|
||||
}
|
||||
|
||||
private static Iterable<? extends HistoryEntry> loadHistoryObjectsForResourceFromSql(
|
||||
VKey<? extends EppResource> parentKey, DateTime afterTime, DateTime beforeTime) {
|
||||
Class<? extends HistoryEntry> historyClass = getHistoryClassFromParent(parentKey.getKind());
|
||||
String repoIdFieldName = getRepoIdFieldNameFromHistoryClass(historyClass);
|
||||
EntityManager entityManager = jpaTm().getEntityManager();
|
||||
String tableName = entityManager.getMetamodel().entity(historyClass).getName();
|
||||
String queryString =
|
||||
String.format(
|
||||
"SELECT entry FROM %s entry WHERE entry.modificationTime >= :afterTime AND "
|
||||
+ "entry.modificationTime <= :beforeTime AND entry.%s = :parentKey",
|
||||
tableName, repoIdFieldName);
|
||||
return entityManager
|
||||
.createQuery(queryString, historyClass)
|
||||
.setParameter("afterTime", afterTime)
|
||||
.setParameter("beforeTime", beforeTime)
|
||||
.setParameter("parentKey", parentKey.getSqlKey().toString())
|
||||
.getResultStream()
|
||||
.sorted(Comparator.comparing(HistoryEntry::getModificationTime))
|
||||
.collect(toImmutableList());
|
||||
}
|
||||
|
||||
private static Class<? extends HistoryEntry> getHistoryClassFromParent(
|
||||
Class<? extends EppResource> parent) {
|
||||
if (parent.equals(ContactResource.class)) {
|
||||
return ContactHistory.class;
|
||||
} else if (parent.equals(DomainBase.class)) {
|
||||
return DomainHistory.class;
|
||||
} else if (parent.equals(HostResource.class)) {
|
||||
return HostHistory.class;
|
||||
}
|
||||
throw new IllegalArgumentException(
|
||||
String.format("Unknown history type for parent %s", parent.getName()));
|
||||
}
|
||||
|
||||
private static String getRepoIdFieldNameFromHistoryClass(
|
||||
Class<? extends HistoryEntry> historyClass) {
|
||||
return historyClass.equals(ContactHistory.class)
|
||||
? "contactRepoId"
|
||||
: historyClass.equals(DomainHistory.class) ? "domainRepoId" : "hostRepoId";
|
||||
}
|
||||
|
||||
private static Iterable<? extends HistoryEntry> loadAllHistoryObjectsFromSql(
|
||||
Class<? extends HistoryEntry> historyClass, DateTime afterTime, DateTime beforeTime) {
|
||||
EntityManager entityManager = jpaTm().getEntityManager();
|
||||
return entityManager
|
||||
.createQuery(
|
||||
String.format(
|
||||
"SELECT entry FROM %s entry WHERE entry.modificationTime >= :afterTime AND "
|
||||
+ "entry.modificationTime <= :beforeTime",
|
||||
entityManager.getMetamodel().entity(historyClass).getName()),
|
||||
historyClass)
|
||||
.setParameter("afterTime", afterTime)
|
||||
.setParameter("beforeTime", beforeTime)
|
||||
.getResultList();
|
||||
}
|
||||
}
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user