1
0
mirror of https://github.com/google/nomulus synced 2026-03-06 10:44:53 +00:00

Compare commits

..

7 Commits

Author SHA1 Message Date
gbrodman
72016b1e5f Update more of the documentation (#2974)
We should be at least at a "good enough" state after this -- I'm sure
there are many updates we could make that would improve the
documentation but this is definitely much improved from before and
should hopefully be good enough to get people started.
2026-03-03 20:25:30 +00:00
gbrodman
25fcef8a5b Fix typo in a command (#2973) 2026-03-02 18:15:44 +00:00
Pavlo Tkach
186dd80567 Enable password reset for registrars (#2971) 2026-02-27 20:02:51 +00:00
gbrodman
c52983fb61 Update some Nomulus documentation (#2970)
This doesn't update everything -- it leaves out some of the more
complicated changes (architecture, code-structure, configuration,
install, and proxy-setup). Those will require more complete rewrites, so
I'm punting them to a future PR.
2026-02-26 19:05:22 +00:00
Weimin Yu
8a3ab00e58 Apply Fee tag normalization in production (#2968)
Feature verified in Sandbox.
2026-02-25 20:02:37 +00:00
Pavlo Tkach
49df9c325a Update angular @21 (#2965) 2026-02-24 20:08:27 +00:00
gbrodman
929dccbfe3 Remove the concept of a TransferData abstract class (#2966)
The only type of thing that can be transferred now is a domain, so
there's no point in having this abstract class / redirection.

This does not include deletion of the contact-response-related XML
classes; that can come next.
2026-02-23 16:08:27 +00:00
102 changed files with 5569 additions and 9512 deletions

View File

@@ -59,8 +59,6 @@ Nomulus has the following capabilities:
implementation that works with BIND. If you are using Google Cloud DNS, you
may need to understand its capabilities and provide your own
multi-[AS](https://en.wikipedia.org/wiki/Autonomous_system_\(Internet\)) solution.
* **[WHOIS](https://en.wikipedia.org/wiki/WHOIS)**: A text-based protocol that
returns ownership and contact information on registered domain names.
* **[Registration Data Access Protocol
(RDAP)](https://en.wikipedia.org/wiki/Registration_Data_Access_Protocol)**:
A JSON API that returns structured, machine-readable information about

View File

@@ -15,7 +15,7 @@
"prefix": "app",
"architect": {
"build": {
"builder": "@angular-devkit/build-angular:application",
"builder": "@angular/build:application",
"options": {
"outputPath": {
"base": "staged/dist/",
@@ -112,7 +112,7 @@
"defaultConfiguration": "production"
},
"serve": {
"builder": "@angular-devkit/build-angular:dev-server",
"builder": "@angular/build:dev-server",
"configurations": {
"production": {
"buildTarget": "console-webapp:build:production"
@@ -136,16 +136,18 @@
"defaultConfiguration": "development"
},
"extract-i18n": {
"builder": "@angular-devkit/build-angular:extract-i18n",
"builder": "@angular/build:extract-i18n",
"options": {
"buildTarget": "console-webapp:build"
}
},
"test": {
"builder": "@angular-devkit/build-angular:karma",
"builder": "@angular/build:karma",
"options": {
"main": "src/test.ts",
"polyfills": "src/polyfills.ts",
"polyfills": [
"src/polyfills.ts"
],
"tsConfig": "tsconfig.spec.json",
"karmaConfig": "karma.conf.js",
"inlineStyleLanguage": "scss",
@@ -183,5 +185,31 @@
"schematicCollections": [
"@angular-eslint/schematics"
]
},
"schematics": {
"@schematics/angular:component": {
"type": "component"
},
"@schematics/angular:directive": {
"type": "directive"
},
"@schematics/angular:service": {
"type": "service"
},
"@schematics/angular:guard": {
"typeSeparator": "."
},
"@schematics/angular:interceptor": {
"typeSeparator": "."
},
"@schematics/angular:module": {
"typeSeparator": "."
},
"@schematics/angular:pipe": {
"typeSeparator": "."
},
"@schematics/angular:resolver": {
"typeSeparator": "."
}
}
}

View File

@@ -1,7 +1,7 @@
{
"/console-api":
{
"target": "http://[::1]:8080",
"target": "http://localhost:8080",
"secure": false,
"logLevel": "debug",
"changeOrigin": true

View File

@@ -21,7 +21,7 @@ module.exports = function (config) {
require('karma-chrome-launcher'),
require('karma-jasmine-html-reporter'),
require('karma-coverage'),
require('@angular-devkit/build-angular/plugins/karma')
],
client: {
jasmine: {

File diff suppressed because it is too large Load Diff

View File

@@ -16,29 +16,29 @@
},
"private": true,
"dependencies": {
"@angular/animations": "^19.1.4",
"@angular/cdk": "^19.1.2",
"@angular/common": "^19.1.4",
"@angular/compiler": "^19.1.4",
"@angular/core": "^19.1.4",
"@angular/forms": "^19.1.4",
"@angular/material": "^19.1.2",
"@angular/platform-browser": "^19.1.4",
"@angular/platform-browser-dynamic": "^19.1.4",
"@angular/router": "^19.1.4",
"@angular/animations": "^21.1.5",
"@angular/cdk": "^21.1.5",
"@angular/common": "^21.1.5",
"@angular/compiler": "^21.1.5",
"@angular/core": "^21.1.5",
"@angular/forms": "^21.1.5",
"@angular/material": "^21.1.5",
"@angular/platform-browser": "^21.1.5",
"@angular/platform-browser-dynamic": "^21.1.5",
"@angular/router": "^21.1.5",
"rxjs": "~7.5.0",
"tslib": "^2.3.0",
"zone.js": "~0.15.0"
},
"devDependencies": {
"@angular-devkit/build-angular": "^19.1.5",
"@angular-eslint/builder": "19.0.2",
"@angular-eslint/eslint-plugin": "19.0.2",
"@angular-eslint/eslint-plugin-template": "19.0.2",
"@angular-eslint/schematics": "19.0.2",
"@angular-eslint/template-parser": "19.0.2",
"@angular/cli": "~19.1.5",
"@angular/compiler-cli": "^19.1.4",
"@angular/build": "^21.1.4",
"@angular/cli": "~21.1.4",
"@angular/compiler-cli": "^21.1.5",
"@types/jasmine": "~4.0.0",
"@types/node": "^18.19.74",
"@typescript-eslint/eslint-plugin": "^7.2.0",
@@ -52,6 +52,6 @@
"karma-jasmine": "~5.1.0",
"karma-jasmine-html-reporter": "~2.0.0",
"prettier": "2.8.7",
"typescript": "^5.7.3"
"typescript": "^5.9.3"
}
}
}

View File

@@ -1,10 +1,9 @@
<div class="console-app mat-typography">
<app-header (toggleNavOpen)="toggleSidenav()"></app-header>
<div class="console-app__global-spinner">
<mat-progress-bar
mode="indeterminate"
*ngIf="globalLoader.isLoading"
></mat-progress-bar>
@if (globalLoader.isLoading) {
<mat-progress-bar mode="indeterminate"></mat-progress-bar>
}
</div>
<mat-sidenav-container class="console-app__container">
<mat-sidenav-content class="console-app__content-wrapper">

View File

@@ -143,13 +143,14 @@
<ng-container matColumnDef="domainName">
<mat-header-cell *matHeaderCellDef>Domain Name</mat-header-cell>
<mat-cell *matCellDef="let element">
@if (getOperationMessage(element.domainName)) {
<mat-icon
*ngIf="getOperationMessage(element.domainName)"
[matTooltip]="getOperationMessage(element.domainName)"
matTooltipPosition="above"
class="primary-text"
>info</mat-icon
>
}
<span>{{ element.domainName }}</span>
</mat-cell>
</ng-container>
@@ -209,9 +210,9 @@
<mat-row *matRowDef="let row; columns: displayedColumns"></mat-row>
<!-- Row shown when there is no matching data. -->
<mat-row *matNoDataRow>
<mat-cell colspan="6">No domains found</mat-cell>
</mat-row>
<tr class="mat-row" *matNoDataRow>
<td class="mat-cell" colspan="6">No domains found</td>
</tr>
</mat-table>
<mat-paginator
[length]="totalResults"

View File

@@ -33,6 +33,7 @@ import {
MatDialogRef,
} from '@angular/material/dialog';
import { RESTRICTED_ELEMENTS } from '../shared/directives/userLevelVisiblity.directive';
import { CdkColumnDef } from '@angular/cdk/table';
interface DomainResponse {
message: string;
@@ -114,6 +115,7 @@ export class ReasonDialogComponent {
templateUrl: './domainList.component.html',
styleUrls: ['./domainList.component.scss'],
standalone: false,
providers: [CdkColumnDef],
})
export class DomainListComponent {
public static PATH = 'domain-list';

View File

@@ -1,14 +1,15 @@
<p class="console-app__header">
<mat-toolbar>
@if (breakpointObserver.isMobileView()) {
<button
mat-icon-button
aria-label="Open navigation menu"
(click)="toggleNavPane()"
*ngIf="breakpointObserver.isMobileView()"
class="console-app__menu-btn"
>
<mat-icon>menu</mat-icon>
</button>
}
<a
[routerLink]="'/home'"
routerLinkActive="active"
@@ -65,7 +66,9 @@
</svg>
</a>
<span class="spacer"></span>
<app-registrar-selector *ngIf="!breakpointObserver.isMobileView()" />
@if (!breakpointObserver.isMobileView()) {
<app-registrar-selector />
}
<button
class="console-app__header-user-icon"
mat-mini-fab
@@ -79,5 +82,7 @@
<button mat-menu-item (click)="logOut()">Log out</button>
</mat-menu>
</mat-toolbar>
<app-registrar-selector *ngIf="breakpointObserver.isMobileView()" />
@if (breakpointObserver.isMobileView()) {
<app-registrar-selector />
}
</p>

View File

@@ -9,41 +9,36 @@
<mat-card>
<mat-card-content>
<mat-list role="list">
<ng-container *ngFor="let item of historyRecords; let last = last">
<mat-list-item class="history-list__item">
<mat-icon
[ngClass]="getIconClass(item.type)"
class="history-list__icon"
>
{{ getIconForType(item.type) }}
</mat-icon>
<div class="history-list__content">
<div class="history-list__description">
<span class="history-list__description--main">{{
item.type
}}</span>
<div>
<mat-chip
*ngIf="parseDescription(item.description).detail"
class="history-list__chip"
>
{{ parseDescription(item.description).detail }}
</mat-chip>
</div>
</div>
<div class="history-list__user">
<b>User - {{ item.actingUser.emailAddress }}</b>
@for (item of historyRecords; track item; let last = $last) {
<mat-list-item class="history-list__item">
<mat-icon
[ngClass]="getIconClass(item.type)"
class="history-list__icon"
>
{{ getIconForType(item.type) }}
</mat-icon>
<div class="history-list__content">
<div class="history-list__description">
<span class="history-list__description--main">{{ item.type }}</span>
<div>
@if (parseDescription(item.description).detail) {
<mat-chip class="history-list__chip">
{{ parseDescription(item.description).detail }}
</mat-chip>
}
</div>
</div>
<span class="history-list__timestamp">
{{ item.modificationTime | date : "MMM d, y, h:mm a" }}
</span>
</mat-list-item>
<mat-divider *ngIf="!last"></mat-divider>
</ng-container>
<div class="history-list__user">
<b>User - {{ item.actingUser.emailAddress }}</b>
</div>
</div>
<span class="history-list__timestamp">
{{ item.modificationTime | date : "MMM d, y, h:mm a" }}
</span>
</mat-list-item>
@if (!last) {
<mat-divider></mat-divider>
} }
</mat-list>
</mat-card-content>
</mat-card>

View File

@@ -17,6 +17,9 @@ import { ComponentFixture, TestBed } from '@angular/core/testing';
import { HomeComponent } from './home.component';
import { MaterialModule } from '../material.module';
import { AppModule } from '../app.module';
import { BackendService } from '../shared/services/backend.service';
import { provideHttpClient } from '@angular/common/http';
import { provideHttpClientTesting } from '@angular/common/http/testing';
describe('HomeComponent', () => {
let component: HomeComponent;
@@ -26,6 +29,11 @@ describe('HomeComponent', () => {
await TestBed.configureTestingModule({
imports: [MaterialModule, AppModule],
declarations: [HomeComponent],
providers: [
BackendService,
provideHttpClient(),
provideHttpClientTesting(),
],
}).compileComponents();
fixture = TestBed.createComponent(HomeComponent);

View File

@@ -12,9 +12,11 @@
[class.active]="router.url.includes(node.path)"
[elementId]="getElementId(node)"
>
<mat-icon class="console-app__nav-icon" *ngIf="node.iconName">
@if (node.iconName) {
<mat-icon class="console-app__nav-icon">
{{ node.iconName }}
</mat-icon>
}
{{ node.title }}
</mat-tree-node>
<mat-nested-tree-node
@@ -34,9 +36,11 @@
{{ treeControl.isExpanded(node) ? "expand_more" : "chevron_right" }}
</mat-icon>
</button>
<mat-icon class="console-app__nav-icon" *ngIf="node.iconName">
@if (node.iconName) {
<mat-icon class="console-app__nav-icon">
{{ node.iconName }}
</mat-icon>
}
{{ node.title }}
</div>
<div

View File

@@ -1,25 +1,28 @@
<h1 class="mat-headline-4">OT&E Status Check</h1>
@if(registrarId() === null) {
<h1>Missing registrarId param</h1>
} @else if(isOte()) {
<h1 *ngIf="oteStatusResponse().length">
} @else if(isOte()) { @if (oteStatusResponse().length) {
<h1>
Status:
<span>{{ oteStatusUnfinished().length ? "Unfinished" : "Completed" }}</span>
</h1>
}
<div class="console-app__ote-status">
@if(oteStatusCompleted().length) {
<div class="console-app__ote-status_completed">
<h1>Completed</h1>
<div *ngFor="let entry of oteStatusCompleted()">
<mat-icon>check_box</mat-icon>{{ entry.description }}
</div>
@for (entry of oteStatusCompleted(); track entry) {
<div><mat-icon>check_box</mat-icon>{{ entry.description }}</div>
}
</div>
} @if(oteStatusUnfinished().length) {
<div class="console-app__ote-status_unfinished">
<h1>Unfinished</h1>
<div *ngFor="let entry of oteStatusUnfinished()">
@for (entry of oteStatusUnfinished(); track entry) {
<div>
<mat-icon>check_box_outline_blank</mat-icon>{{ entry.description }}
</div>
}
</div>
}
</div>

View File

@@ -18,7 +18,7 @@ import { MatSnackBar } from '@angular/material/snack-bar';
import { RegistrarService } from '../registrar/registrar.service';
import { MaterialModule } from '../material.module';
import { SnackBarModule } from '../snackbar.module';
import { CommonModule } from '@angular/common';
import { ActivatedRoute, ParamMap } from '@angular/router';
import { take } from 'rxjs';
@@ -31,7 +31,7 @@ export interface OteStatusResponse {
@Component({
selector: 'app-ote-status',
imports: [MaterialModule, SnackBarModule, CommonModule],
imports: [MaterialModule, SnackBarModule],
templateUrl: './oteStatus.component.html',
styleUrls: ['./oteStatus.component.scss'],
})

View File

@@ -11,9 +11,8 @@
<mat-icon>arrow_back</mat-icon>
</button>
<div class="spacer"></div>
@if(!inEdit && !registrarNotFound) {
@if(!inEdit && !registrarNotFound) { @if (oteButtonVisible) {
<button
*ngIf="oteButtonVisible"
mat-stroked-button
(click)="checkOteStatus()"
aria-label="Check OT&E account status"
@@ -21,6 +20,7 @@
>
Check OT&E Status
</button>
}
<button
mat-flat-button
color="primary"
@@ -39,10 +39,11 @@
<h1>Registrar not found</h1>
} @else {
<h1>{{ registrarInEdit.registrarId }}</h1>
<h2 *ngIf="registrarInEdit.registrarName !== registrarInEdit.registrarId">
@if (registrarInEdit.registrarName !== registrarInEdit.registrarId) {
<h2>
{{ registrarInEdit.registrarName }}
</h2>
@if(inEdit) {
} @if(inEdit) {
<form (ngSubmit)="saveAndClose()">
<div>
<mat-form-field appearance="outline">
@@ -60,15 +61,14 @@
<mat-form-field appearance="outline">
<mat-label>Onboarded TLDs: </mat-label>
<mat-chip-grid #chipGrid aria-label="Enter TLD">
<mat-chip-row
*ngFor="let tld of registrarInEdit.allowedTlds"
(removed)="removeTLD(tld)"
>
@for (tld of registrarInEdit.allowedTlds; track tld) {
<mat-chip-row (removed)="removeTLD(tld)">
{{ tld }}
<button matChipRemove aria-label="'remove ' + tld">
<mat-icon>cancel</mat-icon>
</button>
</mat-chip-row>
}
</mat-chip-grid>
<input
placeholder="New tld..."

View File

@@ -5,15 +5,16 @@
<div class="console-app__registrars-header">
<h1 class="mat-headline-4" forceFocus>Registrars</h1>
<div class="spacer"></div>
@if (oteButtonVisible) {
<button
mat-stroked-button
*ngIf="oteButtonVisible"
(click)="createOteAccount()"
aria-label="Generate OT&E accounts"
[elementId]="getElementIdForOteBlock()"
>
Create OT&E accounts
</button>
}
<button
class="console-app__registrars-new"
mat-flat-button
@@ -43,10 +44,8 @@
class="console-app__registrars-table"
matSort
>
<ng-container
*ngFor="let column of columns"
[matColumnDef]="column.columnDef"
>
@for (column of columns; track column) {
<ng-container [matColumnDef]="column.columnDef">
<mat-header-cell *matHeaderCellDef>
{{ column.header }}
</mat-header-cell>
@@ -55,6 +54,7 @@
[innerHTML]="column.cell(row)"
></mat-cell>
</ng-container>
}
<mat-header-row *matHeaderRowDef="displayedColumns"></mat-header-row>
<mat-row
*matRowDef="let row; columns: displayedColumns"

View File

@@ -22,13 +22,12 @@
</div>
} @else {
<mat-table [dataSource]="dataSource" class="mat-elevation-z0">
<ng-container
*ngFor="let column of columns"
[matColumnDef]="column.columnDef"
>
@for (column of columns; track column) {
<ng-container [matColumnDef]="column.columnDef">
<mat-header-cell *matHeaderCellDef> {{ column.header }} </mat-header-cell>
<mat-cell *matCellDef="let row" [innerHTML]="column.cell(row)"></mat-cell>
</ng-container>
}
<mat-header-row *matHeaderRowDef="displayedColumns"></mat-header-row>
<mat-row
*matRowDef="let row; columns: displayedColumns"

View File

@@ -1,9 +1,5 @@
<div
class="console-app__contact"
*ngIf="contactService.contactInEdit"
cdkTrapFocus
[cdkTrapFocusAutoCapture]="true"
>
@if (contactService.contactInEdit) {
<div class="console-app__contact" cdkTrapFocus [cdkTrapFocusAutoCapture]="true">
<div class="console-app__contact-controls">
<button
mat-icon-button
@@ -32,7 +28,6 @@
</button>
}
</div>
@if(isEditing || contactService.isContactNewView) {
<h1>Contact Details</h1>
<form (ngSubmit)="save($event)">
@@ -46,7 +41,6 @@
[ngModelOptions]="{ standalone: true }"
/>
</mat-form-field>
<mat-form-field appearance="outline">
<mat-label>Primary account email: </mat-label>
<input
@@ -64,7 +58,6 @@
"
/>
</mat-form-field>
<mat-form-field appearance="outline">
<mat-label>Phone: </mat-label>
<input
@@ -74,7 +67,6 @@
placeholder="+0.0000000000"
/>
</mat-form-field>
<mat-form-field appearance="outline">
<mat-label>Fax: </mat-label>
<input
@@ -84,7 +76,6 @@
/>
</mat-form-field>
</section>
<section>
<h1>Contact Type</h1>
<p class="console-app__contact-required">
@@ -92,21 +83,18 @@
(primary contact can't be updated)
</p>
<div class="">
<ng-container
*ngFor="let contactType of contactTypeToTextMap | keyvalue"
@for (contactType of contactTypeToTextMap | keyvalue; track contactType)
{ @if (shouldDisplayCheckbox(contactType.key)) {
<mat-checkbox
[checked]="checkboxIsChecked(contactType.key)"
(change)="checkboxOnChange($event, contactType.key)"
[disabled]="checkboxIsDisabled(contactType.key)"
>
<mat-checkbox
*ngIf="shouldDisplayCheckbox(contactType.key)"
[checked]="checkboxIsChecked(contactType.key)"
(change)="checkboxOnChange($event, contactType.key)"
[disabled]="checkboxIsDisabled(contactType.key)"
>
{{ contactType.value }}
</mat-checkbox>
</ng-container>
{{ contactType.value }}
</mat-checkbox>
} }
</div>
</section>
<section>
<h1>RDAP Preferences</h1>
<div>
@@ -116,7 +104,6 @@
>Show in Registrar RDAP record as admin contact</mat-checkbox
>
</div>
<div>
<mat-checkbox
[(ngModel)]="contactService.contactInEdit.visibleInRdapAsTech"
@@ -124,7 +111,6 @@
>Show in Registrar RDAP record as technical contact</mat-checkbox
>
</div>
<div>
<mat-checkbox
[(ngModel)]="contactService.contactInEdit.visibleInDomainRdapAsAbuse"
@@ -198,15 +184,13 @@
</mat-list-item>
} @if(contactService.contactInEdit.visibleInRdapAsTech) {
<mat-divider></mat-divider>
<mat-list-item
role="listitem"
*ngIf="contactService.contactInEdit.visibleInRdapAsTech"
>
@if (contactService.contactInEdit.visibleInRdapAsTech) {
<mat-list-item role="listitem">
<span class="console-app__list-value"
>Show in Registrar RDAP record as technical contact</span
>
</mat-list-item>
} @if(contactService.contactInEdit.visibleInDomainRdapAsAbuse) {
} } @if(contactService.contactInEdit.visibleInDomainRdapAsAbuse) {
<mat-divider></mat-divider>
<mat-list-item role="listitem">
<span class="console-app__list-value"
@@ -220,3 +204,4 @@
</mat-card>
}
</div>
}

View File

@@ -1,6 +1,6 @@
@if (registrarInEdit) {
<div
class="console-app__rdap-edit"
*ngIf="registrarInEdit"
cdkTrapFocus
[cdkTrapFocusAutoCapture]="true"
>
@@ -12,7 +12,6 @@
>
<mat-icon>arrow_back</mat-icon>
</button>
<div class="console-app__rdap-edit-controls">
<span>
General registrar information for your RDAP record. This information is
@@ -20,10 +19,8 @@
</span>
<div class="spacer"></div>
</div>
<div class="console-app__rdap-edit">
<h1>Personal info</h1>
<form (ngSubmit)="save($event)">
<mat-form-field appearance="outline">
<mat-label>Email: </mat-label>
@@ -34,7 +31,6 @@
[ngModelOptions]="{ standalone: true }"
/>
</mat-form-field>
<mat-form-field appearance="outline">
<mat-label>Phone: </mat-label>
<input
@@ -44,7 +40,6 @@
[ngModelOptions]="{ standalone: true }"
/>
</mat-form-field>
<mat-form-field appearance="outline">
<mat-label>Fax: </mat-label>
<input
@@ -54,7 +49,6 @@
[ngModelOptions]="{ standalone: true }"
/>
</mat-form-field>
<mat-form-field appearance="outline">
<mat-label>Street Address (line 1): </mat-label>
<input
@@ -64,7 +58,6 @@
[(ngModel)]="registrarInEdit.localizedAddress.street![0]"
/>
</mat-form-field>
<mat-form-field appearance="outline">
<mat-label>Street Address (line 2): </mat-label>
<input
@@ -74,7 +67,6 @@
[(ngModel)]="registrarInEdit.localizedAddress.street![1]"
/>
</mat-form-field>
<mat-form-field appearance="outline">
<mat-label>City: </mat-label>
<input
@@ -84,7 +76,6 @@
[(ngModel)]="(registrarInEdit.localizedAddress || {}).city"
/>
</mat-form-field>
<mat-form-field appearance="outline">
<mat-label>State or Province: </mat-label>
<input
@@ -94,7 +85,6 @@
[(ngModel)]="(registrarInEdit.localizedAddress || {}).state"
/>
</mat-form-field>
<mat-form-field appearance="outline">
<mat-label>Country: </mat-label>
<input
@@ -104,7 +94,6 @@
[(ngModel)]="(registrarInEdit.localizedAddress || {}).countryCode"
/>
</mat-form-field>
<mat-form-field appearance="outline">
<mat-label>Postal code: </mat-label>
<input
@@ -114,7 +103,6 @@
[(ngModel)]="(registrarInEdit.localizedAddress || {}).zip"
/>
</mat-form-field>
<button
mat-flat-button
color="primary"
@@ -126,3 +114,4 @@
</form>
</div>
</div>
}

View File

@@ -21,7 +21,6 @@
[formGroup]="passwordUpdateForm"
(submitResults)="save($event)"
/>
@if(userDataService.userData()?.isAdmin) {
<div class="settings-security__reset-password-field">
<h2>Need to reset your EPP password?</h2>
<button
@@ -33,5 +32,4 @@
Reset EPP password via email
</button>
</div>
}
</div>

View File

@@ -20,7 +20,7 @@ import { RegistrarService } from 'src/app/registrar/registrar.service';
import { SecurityService } from './security.service';
import { UserDataService } from 'src/app/shared/services/userData.service';
import { MatDialog, MatDialogRef } from '@angular/material/dialog';
import { CommonModule } from '@angular/common';
import { MaterialModule } from 'src/app/material.module';
import { filter, switchMap, take } from 'rxjs';
import { BackendService } from 'src/app/shared/services/backend.service';
@@ -41,7 +41,7 @@ import {
<button mat-button color="warn" (click)="onSave()">Confirm</button>
</mat-dialog-actions>
`,
imports: [CommonModule, MaterialModule],
imports: [MaterialModule],
})
export class ResetEppPasswordComponent {
constructor(public dialogRef: MatDialogRef<ResetEppPasswordComponent>) {}

View File

@@ -68,7 +68,7 @@
>
</mat-list-item>
<mat-divider></mat-divider>
@for (item of dataSource.ipAddressAllowList; track item.value) {
@for (item of dataSource.ipAddressAllowList; track $index){
<mat-list-item role="listitem">
<span class="console-app__list-value">{{ item.value }}</span>
</mat-list-item>

View File

@@ -12,7 +12,13 @@
// See the License for the specific language governing permissions and
// limitations under the License.
import { ComponentFixture, TestBed, waitForAsync } from '@angular/core/testing';
import {
ComponentFixture,
fakeAsync,
TestBed,
tick,
waitForAsync,
} from '@angular/core/testing';
import { provideHttpClient } from '@angular/common/http';
import { provideHttpClientTesting } from '@angular/common/http/testing';
@@ -30,43 +36,32 @@ import { MOCK_REGISTRAR_SERVICE } from 'src/testdata/registrar/registrar.service
describe('SecurityComponent', () => {
let component: SecurityComponent;
let fixture: ComponentFixture<SecurityComponent>;
let fetchSecurityDetailsSpy: Function;
let saveSpy: Function;
let securityServiceStub: Partial<SecurityService>;
beforeEach(async () => {
const securityServiceSpy = jasmine.createSpyObj(SecurityService, [
'fetchSecurityDetails',
'saveChanges',
]);
fetchSecurityDetailsSpy =
securityServiceSpy.fetchSecurityDetails.and.returnValue(of());
saveSpy = securityServiceSpy.saveChanges.and.returnValue(of());
securityServiceStub = {
isEditingSecurity: false,
isEditingPassword: false,
saveChanges: jasmine.createSpy('saveChanges').and.returnValue(of({})),
};
await TestBed.configureTestingModule({
declarations: [SecurityEditComponent, SecurityComponent],
imports: [MaterialModule, BrowserAnimationsModule, FormsModule],
providers: [
BackendService,
SecurityService,
{ provide: SecurityService, useValue: securityServiceStub },
{ provide: RegistrarService, useValue: MOCK_REGISTRAR_SERVICE },
provideHttpClient(),
provideHttpClientTesting(),
],
})
.overrideComponent(SecurityComponent, {
set: {
providers: [
{ provide: SecurityService, useValue: securityServiceSpy },
],
},
})
.compileComponents();
}).compileComponents();
saveSpy = securityServiceStub.saveChanges as jasmine.Spy;
fixture = TestBed.createComponent(SecurityComponent);
component = fixture.componentInstance;
fixture.detectChanges();
});
it('should create', () => {
@@ -93,17 +88,36 @@ describe('SecurityComponent', () => {
});
}));
it('should remove ip', waitForAsync(() => {
component.dataSource.ipAddressAllowList =
component.dataSource.ipAddressAllowList?.splice(1);
fixture.whenStable().then(() => {
fixture.detectChanges();
let listElems: Array<HTMLElement> = Array.from(
fixture.nativeElement.querySelectorAll('span.console-app__list-value')
);
expect(listElems.map((e) => e.textContent)).toContain(
'No IP addresses on file.'
);
it('should remove ip', fakeAsync(() => {
fixture.detectChanges();
tick();
const editBtn = fixture.nativeElement.querySelector(
'button[aria-label="Edit security settings"]'
);
editBtn.click();
tick();
fixture.detectChanges();
const removeIpBtn = fixture.nativeElement.querySelector(
'.console-app__removeIp'
);
removeIpBtn.click();
tick();
fixture.detectChanges();
const saveBtn = fixture.nativeElement.querySelector(
'.settings-security__edit-save'
);
saveBtn.click();
tick();
fixture.detectChanges();
expect(saveSpy).toHaveBeenCalledWith({
ipAddressAllowList: [],
});
}));
@@ -119,21 +133,34 @@ describe('SecurityComponent', () => {
expect(component.securityService.isEditingPassword).toBeTrue();
});
it('should call save', waitForAsync(async () => {
component.editSecurity();
await fixture.whenStable();
it('should call save', fakeAsync(() => {
fixture.detectChanges();
tick();
const editBtn = fixture.nativeElement.querySelector(
'button[aria-label="Edit security settings"]'
);
editBtn.click();
tick();
fixture.detectChanges();
const el = fixture.nativeElement.querySelector(
'.console-app__clientCertificateValue'
);
el.value = 'test';
el.dispatchEvent(new Event('input'));
tick();
fixture.detectChanges();
await fixture.whenStable();
fixture.nativeElement
.querySelector('.settings-security__edit-save')
.click();
expect(saveSpy).toHaveBeenCalledOnceWith({
const saveBtn = fixture.nativeElement.querySelector(
'.settings-security__edit-save'
);
saveBtn.click();
tick();
expect(saveSpy).toHaveBeenCalledWith({
ipAddressAllowList: [{ value: '123.123.123.123' }],
clientCertificate: 'test',
});

View File

@@ -23,9 +23,11 @@
<button
matSuffix
mat-icon-button
class="console-app__removeIp"
[attr.aria-label]="'Remove IP entry ' + ip.value"
(click)="removeIpEntry(ip)"
[disabled]="isUpdating"
type="button"
>
<mat-icon>close</mat-icon>
</button>

View File

@@ -14,9 +14,9 @@
required
autocomplete="current-password"
/>
<mat-error *ngIf="hasError('oldPassword') as errorText">{{
errorText
}}</mat-error>
@if (hasError('oldPassword'); as errorText) {
<mat-error>{{ errorText }}</mat-error>
}
</mat-form-field>
</div>
}
@@ -30,9 +30,9 @@
required
autocomplete="new-password"
/>
<mat-error *ngIf="hasError('newPassword') as errorText">{{
errorText
}}</mat-error>
@if (hasError('newPassword'); as errorText) {
<mat-error>{{ errorText }}</mat-error>
}
</mat-form-field>
</div>
<div class="console-app__password-input-form-field">
@@ -45,9 +45,9 @@
required
autocomplete="new-password"
/>
<mat-error *ngIf="hasError('newPasswordRepeat') as errorText">{{
errorText
}}</mat-error>
@if (hasError('newPasswordRepeat'); as errorText) {
<mat-error>{{ errorText }}</mat-error>
}
</mat-form-field>
</div>
<button

View File

@@ -29,10 +29,9 @@ export const DISABLED_ELEMENTS_PER_ROLE = {
RESTRICTED_ELEMENTS.REGISTRAR_ELEMENT,
RESTRICTED_ELEMENTS.OTE,
RESTRICTED_ELEMENTS.SUSPEND,
RESTRICTED_ELEMENTS.ACTIVITY_PER_USER,
],
SUPPORT_LEAD: [],
SUPPORT_AGENT: [RESTRICTED_ELEMENTS.ACTIVITY_PER_USER],
SUPPORT_AGENT: [],
};
@Directive({

View File

@@ -50,15 +50,17 @@
<mat-icon>delete</mat-icon>
</button>
</div>
<div *ngIf="isNewUser" class="console-app__user-details-save-password">
@if (isNewUser) {
<div class="console-app__user-details-save-password">
<mat-icon>priority_high</mat-icon>
Please save the password. For your security, we do not store passwords in a
recoverable format.
</div>
<p *ngIf="isLoading">
} @if (isLoading) {
<p>
<mat-progress-bar mode="query"></mat-progress-bar>
</p>
}
<mat-card appearance="outlined">
<mat-card-content>

View File

@@ -12,7 +12,6 @@
// See the License for the specific language governing permissions and
// limitations under the License.
import { CommonModule } from '@angular/common';
import { Component, computed } from '@angular/core';
import { MatSnackBar } from '@angular/material/snack-bar';
import { SelectedRegistrarModule } from '../app.module';
@@ -31,7 +30,6 @@ import { UserEditFormComponent } from './userEditForm.component';
FormsModule,
MaterialModule,
SnackBarModule,
CommonModule,
SelectedRegistrarModule,
UserEditFormComponent,
],

View File

@@ -1,6 +1,7 @@
<div class="console-app__user-edit">
<form (ngSubmit)="saveEdit($event)" #form>
<p *ngIf="isNew()">
@if (isNew()) {
<p>
<mat-form-field appearance="outline">
<mat-label
>User name prefix:
@@ -19,6 +20,7 @@
/>
</mat-form-field>
</p>
}
<p>
<mat-form-field appearance="outline">
<mat-label
@@ -44,7 +46,6 @@
Save
</button>
</form>
@if(userDataService.userData()?.isAdmin) {
<button
mat-flat-button
color="primary"
@@ -53,5 +54,4 @@
>
Reset registry lock password
</button>
}
</div>

View File

@@ -12,7 +12,6 @@
// See the License for the specific language governing permissions and
// limitations under the License.
import { CommonModule } from '@angular/common';
import {
Component,
ElementRef,
@@ -50,7 +49,7 @@ import { HttpErrorResponse } from '@angular/common/http';
<button mat-button color="warn" (click)="onSave()">Confirm</button>
</mat-dialog-actions>
`,
imports: [CommonModule, MaterialModule],
imports: [MaterialModule],
})
export class ResetRegistryLockPasswordComponent {
constructor(
@@ -72,7 +71,7 @@ export class ResetRegistryLockPasswordComponent {
selector: 'app-user-edit-form',
templateUrl: './userEditForm.component.html',
styleUrls: ['./userEditForm.component.scss'],
imports: [FormsModule, MaterialModule, CommonModule],
imports: [FormsModule, MaterialModule],
providers: [],
})
export class UserEditFormComponent {

View File

@@ -12,7 +12,6 @@
// See the License for the specific language governing permissions and
// limitations under the License.
import { CommonModule } from '@angular/common';
import { HttpErrorResponse } from '@angular/common/http';
import { Component, effect } from '@angular/core';
import { MatSnackBar } from '@angular/material/snack-bar';
@@ -35,7 +34,6 @@ import { UserEditFormComponent } from './userEditForm.component';
FormsModule,
MaterialModule,
SnackBarModule,
CommonModule,
SelectedRegistrarModule,
UsersListComponent,
UserEditFormComponent,

View File

@@ -5,15 +5,14 @@
class="console-app__users-table"
matSort
>
<ng-container
*ngFor="let column of columns"
[matColumnDef]="column.columnDef"
>
@for (column of columns; track column) {
<ng-container [matColumnDef]="column.columnDef">
<mat-header-cell *matHeaderCellDef>
{{ column.header }}
</mat-header-cell>
<mat-cell *matCellDef="let row" [innerHTML]="column.cell(row)"></mat-cell>
</ng-container>
}
<mat-header-row *matHeaderRowDef="displayedColumns"></mat-header-row>
<mat-row
*matRowDef="let row; columns: displayedColumns"

View File

@@ -12,7 +12,6 @@
// See the License for the specific language governing permissions and
// limitations under the License.
import { CommonModule } from '@angular/common';
import {
Component,
effect,
@@ -43,7 +42,7 @@ export const columns = [
selector: 'app-users-list',
templateUrl: './usersList.component.html',
styleUrls: ['./usersList.component.scss'],
imports: [MaterialModule, CommonModule],
imports: [MaterialModule],
providers: [],
})
export class UsersListComponent {

View File

@@ -12,7 +12,7 @@
// See the License for the specific language governing permissions and
// limitations under the License.
import { enableProdMode } from '@angular/core';
import { enableProdMode, provideZoneChangeDetection } from '@angular/core';
import { platformBrowserDynamic } from '@angular/platform-browser-dynamic';
import { AppModule } from './app/app.module';
@@ -23,5 +23,7 @@ if (environment.production || environment.sandbox) {
}
platformBrowserDynamic()
.bootstrapModule(AppModule)
.bootstrapModule(AppModule, {
applicationProviders: [provideZoneChangeDetection()],
})
.catch((err) => console.error(err));

View File

@@ -14,14 +14,10 @@
"sourceMap": true,
"declaration": false,
"experimentalDecorators": true,
"moduleResolution": "node",
"moduleResolution": "bundler",
"importHelpers": true,
"target": "ES2022",
"module": "es2020",
"lib": [
"es2020",
"dom"
],
"useDefineForClassFields": false
},
"angularCompilerOptions": {

View File

@@ -76,7 +76,7 @@ import google.registry.model.reporting.HistoryEntry.HistoryEntryId;
import google.registry.model.reporting.IcannReportingTypes.ActivityReportField;
import google.registry.model.tld.Tld;
import google.registry.model.transfer.DomainTransferData;
import google.registry.model.transfer.TransferData.TransferServerApproveEntity;
import google.registry.model.transfer.DomainTransferData.TransferServerApproveEntity;
import google.registry.model.transfer.TransferResponse.DomainTransferResponse;
import google.registry.model.transfer.TransferStatus;
import jakarta.inject.Inject;

View File

@@ -36,8 +36,7 @@ import google.registry.model.poll.PollMessage;
import google.registry.model.reporting.HistoryEntry.HistoryEntryId;
import google.registry.model.tld.Tld;
import google.registry.model.transfer.DomainTransferData;
import google.registry.model.transfer.TransferData;
import google.registry.model.transfer.TransferData.TransferServerApproveEntity;
import google.registry.model.transfer.DomainTransferData.TransferServerApproveEntity;
import google.registry.model.transfer.TransferResponse.DomainTransferResponse;
import google.registry.model.transfer.TransferStatus;
import google.registry.persistence.VKey;
@@ -51,7 +50,7 @@ import org.joda.time.DateTime;
*/
public final class DomainTransferUtils {
/** Sets up {@link TransferData} for a domain with links to entities for server approval. */
/** Sets up {@link DomainTransferData} for a domain with links to entities for server approval. */
public static DomainTransferData createPendingTransferData(
String domainRepoId,
Long historyId,
@@ -179,7 +178,7 @@ public final class DomainTransferUtils {
/** Create a poll message for the gaining client in a transfer. */
public static PollMessage createGainingTransferPollMessage(
String targetId,
TransferData transferData,
DomainTransferData transferData,
@Nullable DateTime extendedRegistrationExpirationTime,
DateTime now,
HistoryEntryId domainHistoryId) {
@@ -202,7 +201,7 @@ public final class DomainTransferUtils {
/** Create a poll message for the losing client in a transfer. */
public static PollMessage createLosingTransferPollMessage(
String targetId,
TransferData transferData,
DomainTransferData transferData,
@Nullable DateTime extendedRegistrationExpirationTime,
HistoryEntryId domainHistoryId) {
return new PollMessage.OneTime.Builder()
@@ -216,10 +215,10 @@ public final class DomainTransferUtils {
.build();
}
/** Create a {@link DomainTransferResponse} off of the info in a {@link TransferData}. */
/** Create a {@link DomainTransferResponse} off of the info in a {@link DomainTransferData}. */
static DomainTransferResponse createTransferResponse(
String targetId,
TransferData transferData,
DomainTransferData transferData,
@Nullable DateTime extendedRegistrationExpirationTime) {
return new DomainTransferResponse.Builder()
.setDomainName(targetId)

View File

@@ -34,7 +34,6 @@ import com.google.common.collect.ImmutableSet;
import com.google.gson.annotations.Expose;
import google.registry.config.RegistryConfig;
import google.registry.model.eppcommon.StatusValue;
import google.registry.model.transfer.TransferData;
import google.registry.persistence.VKey;
import google.registry.util.NonFinalForTesting;
import jakarta.persistence.Access;
@@ -207,27 +206,6 @@ public abstract class EppResource extends UpdateAutoTimestampEntity implements B
/** EppResources that are loaded via foreign keys should implement this marker interface. */
public interface ForeignKeyedEppResource {}
/** An interface for resources that have transfer data. */
public interface ResourceWithTransferData<T extends TransferData> {
T getTransferData();
/**
* The time that this resource was last transferred.
*
* <p>Can be null if the resource has never been transferred.
*/
DateTime getLastTransferTime();
}
/** An interface for builders of resources that have transfer data. */
public interface BuilderWithTransferData<
T extends TransferData, B extends BuilderWithTransferData<T, B>> {
B setTransferData(T transferData);
/** Set the time when this resource was transferred. */
B setLastTransferTime(DateTime lastTransferTime);
}
/** Abstract builder for {@link EppResource} types. */
public abstract static class Builder<T extends EppResource, B extends Builder<T, B>>
extends GenericBuilder<T, B> {

View File

@@ -29,7 +29,6 @@ import google.registry.model.poll.PendingActionNotificationResponse.DomainPendin
import google.registry.model.poll.PollMessage;
import google.registry.model.reporting.HistoryEntry;
import google.registry.model.transfer.DomainTransferData;
import google.registry.model.transfer.TransferData;
import google.registry.model.transfer.TransferResponse;
import google.registry.model.transfer.TransferResponse.DomainTransferResponse;
import google.registry.model.transfer.TransferStatus;
@@ -79,7 +78,7 @@ public final class ResourceTransferUtils {
if (!domain.getStatusValues().contains(StatusValue.PENDING_TRANSFER)) {
return;
}
TransferData oldTransferData = domain.getTransferData();
DomainTransferData oldTransferData = domain.getTransferData();
tm().delete(oldTransferData.getServerApproveEntities());
tm().put(
new PollMessage.OneTime.Builder()
@@ -99,8 +98,8 @@ public final class ResourceTransferUtils {
* Turn a domain into a builder with its pending transfer resolved.
*
* <p>This removes the {@link StatusValue#PENDING_TRANSFER} status, sets the {@link
* TransferStatus}, clears all the server-approve fields on the {@link TransferData}, and sets the
* expiration time of the last pending transfer to now.
* TransferStatus}, clears all the server-approve fields on the {@link DomainTransferData}, and
* sets the expiration time of the last pending transfer to now.
*/
private static Domain.Builder resolvePendingTransfer(
Domain domain, TransferStatus transferStatus, DateTime now) {
@@ -125,9 +124,9 @@ public final class ResourceTransferUtils {
* Resolve a pending transfer by awarding it to the gaining client.
*
* <p>This removes the {@link StatusValue#PENDING_TRANSFER} status, sets the {@link
* TransferStatus}, clears all the server-approve fields on the {@link TransferData}, sets the new
* client id, and sets the last transfer time and the expiration time of the last pending transfer
* to now.
* TransferStatus}, clears all the server-approve fields on the {@link DomainTransferData}, sets
* the new client id, and sets the last transfer time and the expiration time of the last pending
* transfer to now.
*/
public static Domain approvePendingTransfer(
Domain domain, TransferStatus transferStatus, DateTime now) {
@@ -143,9 +142,9 @@ public final class ResourceTransferUtils {
* Resolve a pending transfer by denying it.
*
* <p>This removes the {@link StatusValue#PENDING_TRANSFER} status, sets the {@link
* TransferStatus}, clears all the server-approve fields on the {@link TransferData}, sets the
* expiration time of the last pending transfer to now, sets the last EPP update time to now, and
* sets the last EPP update client id to the given client id.
* TransferStatus}, clears all the server-approve fields on the {@link DomainTransferData}, sets
* the expiration time of the last pending transfer to now, sets the last EPP update time to now,
* and sets the last EPP update client id to the given client id.
*/
public static Domain denyPendingTransfer(
Domain domain, TransferStatus transferStatus, DateTime now, String lastEppUpdateRegistrarId) {

View File

@@ -26,7 +26,7 @@ import google.registry.model.UnsafeSerializable;
import google.registry.model.annotations.IdAllocation;
import google.registry.model.domain.DomainHistory;
import google.registry.model.reporting.HistoryEntry.HistoryEntryId;
import google.registry.model.transfer.TransferData.TransferServerApproveEntity;
import google.registry.model.transfer.DomainTransferData.TransferServerApproveEntity;
import google.registry.persistence.VKey;
import jakarta.persistence.Column;
import jakarta.persistence.EnumType;

View File

@@ -43,7 +43,6 @@ import com.google.common.collect.Sets;
import com.google.gson.annotations.Expose;
import google.registry.flows.ResourceFlowUtils;
import google.registry.model.EppResource;
import google.registry.model.EppResource.ResourceWithTransferData;
import google.registry.model.billing.BillingRecurrence;
import google.registry.model.domain.launch.LaunchNotice;
import google.registry.model.domain.rgp.GracePeriodStatus;
@@ -96,8 +95,7 @@ import org.joda.time.Interval;
@MappedSuperclass
@Embeddable
@Access(AccessType.FIELD)
public class DomainBase extends EppResource
implements ResourceWithTransferData<DomainTransferData> {
public class DomainBase extends EppResource {
/** The max number of years that a domain can be registered for, as set by ICANN policy. */
public static final int MAX_REGISTRATION_YEARS = 10;
@@ -319,12 +317,10 @@ public class DomainBase extends EppResource
return Optional.ofNullable(autorenewEndTime.equals(END_OF_TIME) ? null : autorenewEndTime);
}
@Override
public DomainTransferData getTransferData() {
return Optional.ofNullable(transferData).orElse(DomainTransferData.EMPTY);
}
@Override
public DateTime getLastTransferTime() {
return lastTransferTime;
}
@@ -605,7 +601,7 @@ public class DomainBase extends EppResource
/** A builder for constructing {@link Domain}, since it is immutable. */
public static class Builder<T extends DomainBase, B extends Builder<T, B>>
extends EppResource.Builder<T, B> implements BuilderWithTransferData<DomainTransferData, B> {
extends EppResource.Builder<T, B> {
public Builder() {}
@@ -783,13 +779,11 @@ public class DomainBase extends EppResource
return thisCastToDerived();
}
@Override
public B setTransferData(DomainTransferData transferData) {
getInstance().transferData = transferData;
return thisCastToDerived();
}
@Override
public B setLastTransferTime(DateTime lastTransferTime) {
getInstance().lastTransferTime = lastTransferTime;
return thisCastToDerived();

View File

@@ -123,8 +123,7 @@ public class EppXmlTransformer {
public static byte[] marshal(EppOutput root, ValidationMode validation) throws XmlException {
byte[] bytes = marshal(OUTPUT_TRANSFORMER, root, validation);
if (!RegistryEnvironment.PRODUCTION.equals(RegistryEnvironment.get())
&& hasFeeExtension(root)) {
if (hasFeeExtension(root)) {
return FeeExtensionXmlTagNormalizer.normalize(new String(bytes, UTF_8)).getBytes(UTF_8);
}
return bytes;

View File

@@ -24,7 +24,6 @@ import static google.registry.util.DomainNameUtils.canonicalizeHostname;
import com.google.common.collect.ImmutableSet;
import google.registry.model.EppResource;
import google.registry.model.domain.Domain;
import google.registry.model.transfer.TransferData;
import google.registry.persistence.VKey;
import google.registry.persistence.converter.InetAddressSetUserType;
import jakarta.persistence.Access;
@@ -41,8 +40,8 @@ import org.joda.time.DateTime;
/**
* A persistable Host resource including mutable and non-mutable fields.
*
* <p>A host's {@link TransferData} is stored on the superordinate domain. Non-subordinate hosts
* don't carry a full set of TransferData; all they have is lastTransferTime.
* <p>A host's full transfer data is stored on the superordinate domain. Non-subordinate hosts don't
* carry a full set of TransferData; all they have is lastTransferTime.
*
* <p>This class deliberately does not include an {@link jakarta.persistence.Id} so that any
* foreign-keyed fields can refer to the proper parent entity's ID, whether we're storing this in

View File

@@ -32,14 +32,12 @@ import google.registry.model.domain.DomainRenewData;
import google.registry.model.eppoutput.EppResponse.ResponseData;
import google.registry.model.host.Host;
import google.registry.model.host.HostHistory;
import google.registry.model.poll.PendingActionNotificationResponse.ContactPendingActionNotificationResponse;
import google.registry.model.poll.PendingActionNotificationResponse.DomainPendingActionNotificationResponse;
import google.registry.model.poll.PendingActionNotificationResponse.HostPendingActionNotificationResponse;
import google.registry.model.reporting.HistoryEntry;
import google.registry.model.reporting.HistoryEntry.HistoryEntryId;
import google.registry.model.transfer.TransferData.TransferServerApproveEntity;
import google.registry.model.transfer.DomainTransferData.TransferServerApproveEntity;
import google.registry.model.transfer.TransferResponse;
import google.registry.model.transfer.TransferResponse.ContactTransferResponse;
import google.registry.model.transfer.TransferResponse.DomainTransferResponse;
import google.registry.persistence.VKey;
import google.registry.persistence.WithVKey;
@@ -406,12 +404,8 @@ public abstract class PollMessage extends ImmutableObject
if (pendingActionNotificationResponse != null) {
// Promote the pending action notification response to its specialized type.
if (contactId != null) {
pendingActionNotificationResponse =
ContactPendingActionNotificationResponse.create(
pendingActionNotificationResponse.nameOrId.value,
pendingActionNotificationResponse.getActionResult(),
pendingActionNotificationResponse.getTrid(),
pendingActionNotificationResponse.processedDate);
// Contacts are no longer supported
pendingActionNotificationResponse = null;
} else if (domainName != null) {
pendingActionNotificationResponse =
DomainPendingActionNotificationResponse.create(
@@ -432,16 +426,8 @@ public abstract class PollMessage extends ImmutableObject
// The transferResponse is currently an unspecialized TransferResponse instance, create the
// appropriate subclass so that the value is consistently specialized
if (contactId != null) {
transferResponse =
new ContactTransferResponse.Builder()
.setContactId(contactId)
.setGainingRegistrarId(transferResponse.getGainingRegistrarId())
.setLosingRegistrarId(transferResponse.getLosingRegistrarId())
.setTransferStatus(transferResponse.getTransferStatus())
.setTransferRequestTime(transferResponse.getTransferRequestTime())
.setPendingTransferExpirationTime(
transferResponse.getPendingTransferExpirationTime())
.build();
// Contacts are no longer supported
transferResponse = null;
} else if (domainName != null) {
transferResponse =
new DomainTransferResponse.Builder()
@@ -488,9 +474,6 @@ public abstract class PollMessage extends ImmutableObject
// Set identifier fields based on the type of the notification response.
if (instance.pendingActionNotificationResponse
instanceof ContactPendingActionNotificationResponse) {
instance.contactId = instance.pendingActionNotificationResponse.nameOrId.value;
} else if (instance.pendingActionNotificationResponse
instanceof DomainPendingActionNotificationResponse) {
instance.domainName = instance.pendingActionNotificationResponse.nameOrId.value;
} else if (instance.pendingActionNotificationResponse
@@ -507,9 +490,7 @@ public abstract class PollMessage extends ImmutableObject
.orElse(null);
// Set the identifier according to the TransferResponse type.
if (instance.transferResponse instanceof ContactTransferResponse) {
instance.contactId = ((ContactTransferResponse) instance.transferResponse).getContactId();
} else if (instance.transferResponse instanceof DomainTransferResponse response) {
if (instance.transferResponse instanceof DomainTransferResponse response) {
instance.domainName = response.getDomainName();
instance.extendedRegistrationExpirationTime =
response.getExtendedRegistrationExpirationTime();

View File

@@ -25,7 +25,7 @@ import jakarta.xml.bind.annotation.XmlElement;
import jakarta.xml.bind.annotation.XmlTransient;
import org.joda.time.DateTime;
/** Fields common to {@link TransferData} and {@link TransferResponse}. */
/** Fields common to {@link DomainTransferData} and {@link TransferResponse}. */
@XmlTransient
@MappedSuperclass
public abstract class BaseTransferObject extends ImmutableObject implements UnsafeSerializable {

View File

@@ -1,48 +0,0 @@
// Copyright 2020 The Nomulus Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package google.registry.model.transfer;
import jakarta.persistence.Embeddable;
/** Transfer data for contact. */
@Embeddable
public class ContactTransferData extends TransferData {
public static final ContactTransferData EMPTY = new ContactTransferData();
@Override
public boolean isEmpty() {
return EMPTY.equals(this);
}
@Override
protected Builder createEmptyBuilder() {
return new Builder();
}
@Override
public Builder asBuilder() {
return new Builder(clone(this));
}
public static class Builder extends TransferData.Builder<ContactTransferData, Builder> {
/** Create a {@link Builder} wrapping a new instance. */
public Builder() {}
/** Create a {@link Builder} wrapping the given instance. */
private Builder(ContactTransferData instance) {
super(instance);
}
}
}

View File

@@ -14,14 +14,20 @@
package google.registry.model.transfer;
import static com.google.common.base.Preconditions.checkState;
import static com.google.common.collect.ImmutableList.toImmutableList;
import static google.registry.util.CollectionUtils.isNullOrEmpty;
import static google.registry.util.CollectionUtils.nullToEmpty;
import com.google.common.collect.ImmutableList;
import com.google.common.collect.ImmutableSet;
import google.registry.model.Buildable;
import google.registry.model.billing.BillingCancellation;
import google.registry.model.billing.BillingEvent;
import google.registry.model.billing.BillingRecurrence;
import google.registry.model.domain.Period;
import google.registry.model.domain.Period.Unit;
import google.registry.model.eppcommon.Trid;
import google.registry.model.poll.PollMessage;
import google.registry.persistence.VKey;
import google.registry.util.NullIgnoringCollectionBuilder;
@@ -36,9 +42,41 @@ import org.joda.time.DateTime;
/** Transfer data for domain. */
@Embeddable
public class DomainTransferData extends TransferData {
public class DomainTransferData extends BaseTransferObject implements Buildable {
public static final DomainTransferData EMPTY = new DomainTransferData();
/** The transaction id of the most recent transfer request (or null if there never was one). */
@Embedded
@AttributeOverrides({
@AttributeOverride(
name = "serverTransactionId",
column = @Column(name = "transfer_server_txn_id")),
@AttributeOverride(
name = "clientTransactionId",
column = @Column(name = "transfer_client_txn_id"))
})
Trid transferRequestTrid;
@Column(name = "transfer_repo_id")
String repoId;
@Column(name = "transfer_history_entry_id")
Long historyEntryId;
// The pollMessageId1 and pollMessageId2 are used to store the IDs for gaining and losing poll
// messages in Cloud SQL.
//
// In addition, there may be a third poll message for the autorenew poll message on domain
// transfer if applicable.
@Column(name = "transfer_poll_message_id_1")
Long pollMessageId1;
@Column(name = "transfer_poll_message_id_2")
Long pollMessageId2;
@Column(name = "transfer_poll_message_id_3")
Long pollMessageId3;
/**
* The period to extend the registration upon completion of the transfer.
*
@@ -107,15 +145,19 @@ public class DomainTransferData extends TransferData {
@Column(name = "transfer_autorenew_poll_message_history_id")
Long serverApproveAutorenewPollMessageHistoryId;
@Override
public Builder copyConstantFieldsToBuilder() {
return ((Builder) super.copyConstantFieldsToBuilder()).setTransferPeriod(transferPeriod);
}
public Period getTransferPeriod() {
return transferPeriod;
}
public Long getHistoryEntryId() {
return historyEntryId;
}
@Nullable
public Trid getTransferRequestTrid() {
return transferRequestTrid;
}
@Nullable
public DateTime getTransferredRegistrationExpirationTime() {
return transferredRegistrationExpirationTime;
@@ -141,12 +183,13 @@ public class DomainTransferData extends TransferData {
return serverApproveAutorenewPollMessageHistoryId;
}
@Override
public ImmutableSet<VKey<? extends TransferServerApproveEntity>> getServerApproveEntities() {
ImmutableSet.Builder<VKey<? extends TransferServerApproveEntity>> builder =
new ImmutableSet.Builder<>();
builder.addAll(super.getServerApproveEntities());
return NullIgnoringCollectionBuilder.create(builder)
.add(pollMessageId1 != null ? VKey.create(PollMessage.class, pollMessageId1) : null)
.add(pollMessageId2 != null ? VKey.create(PollMessage.class, pollMessageId2) : null)
.add(pollMessageId3 != null ? VKey.create(PollMessage.class, pollMessageId3) : null)
.add(serverApproveBillingEvent)
.add(serverApproveAutorenewEvent)
.add(billingCancellationId)
@@ -154,16 +197,10 @@ public class DomainTransferData extends TransferData {
.build();
}
@Override
public boolean isEmpty() {
return EMPTY.equals(this);
}
@Override
protected Builder createEmptyBuilder() {
return new Builder();
}
@Override
public Builder asBuilder() {
return new Builder(clone(this));
@@ -186,7 +223,72 @@ public class DomainTransferData extends TransferData {
}
}
public static class Builder extends TransferData.Builder<DomainTransferData, Builder> {
/**
* Returns a fresh Builder populated only with the constant fields of this TransferData, i.e.
* those that are fixed and unchanging throughout the transfer process.
*
* <p>These fields are:
*
* <ul>
* <li>transferRequestTrid
* <li>transferRequestTime
* <li>gainingClientId
* <li>losingClientId
* <li>transferPeriod
* </ul>
*/
public Builder copyConstantFieldsToBuilder() {
return new Builder()
.setTransferPeriod(transferPeriod)
.setTransferRequestTrid(transferRequestTrid)
.setTransferRequestTime(transferRequestTime)
.setGainingRegistrarId(gainingClientId)
.setLosingRegistrarId(losingClientId);
}
/** Maps serverApproveEntities set to the individual fields. */
static void mapServerApproveEntitiesToFields(
Set<VKey<? extends TransferServerApproveEntity>> serverApproveEntities,
DomainTransferData transferData) {
if (isNullOrEmpty(serverApproveEntities)) {
transferData.pollMessageId1 = null;
transferData.pollMessageId2 = null;
transferData.pollMessageId3 = null;
return;
}
ImmutableList<Long> sortedPollMessageIds = getSortedPollMessageIds(serverApproveEntities);
if (!sortedPollMessageIds.isEmpty()) {
transferData.pollMessageId1 = sortedPollMessageIds.get(0);
}
if (sortedPollMessageIds.size() >= 2) {
transferData.pollMessageId2 = sortedPollMessageIds.get(1);
}
if (sortedPollMessageIds.size() >= 3) {
transferData.pollMessageId3 = sortedPollMessageIds.get(2);
}
}
/**
* Gets poll message IDs from the given serverApproveEntities and sorts the IDs in natural order.
*/
private static ImmutableList<Long> getSortedPollMessageIds(
Set<VKey<? extends TransferServerApproveEntity>> serverApproveEntities) {
return nullToEmpty(serverApproveEntities).stream()
.filter(vKey -> PollMessage.class.isAssignableFrom(vKey.getKind()))
.map(vKey -> (long) vKey.getKey())
.sorted()
.collect(toImmutableList());
}
/**
* Marker interface for objects that are written in anticipation of a server approval, and
* therefore need to be deleted under any other outcome.
*/
public interface TransferServerApproveEntity {
VKey<? extends TransferServerApproveEntity> createVKey();
}
public static class Builder extends BaseTransferObject.Builder<DomainTransferData, Builder> {
/** Create a {@link Builder} wrapping a new instance. */
public Builder() {}
@@ -195,6 +297,20 @@ public class DomainTransferData extends TransferData {
super(instance);
}
@Override
public DomainTransferData build() {
if (getInstance().pollMessageId1 != null) {
checkState(getInstance().repoId != null, "Repo id undefined");
checkState(getInstance().historyEntryId != null, "History entry undefined");
}
return super.build();
}
public Builder setTransferRequestTrid(Trid transferRequestTrid) {
getInstance().transferRequestTrid = transferRequestTrid;
return this;
}
public Builder setTransferPeriod(Period transferPeriod) {
getInstance().transferPeriod = transferPeriod;
return this;
@@ -223,12 +339,13 @@ public class DomainTransferData extends TransferData {
return this;
}
@Override
public Builder setServerApproveEntities(
String repoId,
Long historyId,
ImmutableSet<VKey<? extends TransferServerApproveEntity>> serverApproveEntities) {
super.setServerApproveEntities(repoId, historyId, serverApproveEntities);
getInstance().repoId = repoId;
getInstance().historyEntryId = historyId;
mapServerApproveEntitiesToFields(serverApproveEntities, getInstance());
mapBillingCancellationEntityToField(serverApproveEntities, getInstance());
return this;
}

View File

@@ -1,205 +0,0 @@
// Copyright 2017 The Nomulus Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package google.registry.model.transfer;
import static com.google.common.base.Preconditions.checkState;
import static com.google.common.collect.ImmutableList.toImmutableList;
import static google.registry.util.CollectionUtils.isNullOrEmpty;
import static google.registry.util.CollectionUtils.nullToEmpty;
import com.google.common.collect.ImmutableList;
import com.google.common.collect.ImmutableSet;
import google.registry.model.Buildable;
import google.registry.model.EppResource;
import google.registry.model.eppcommon.Trid;
import google.registry.model.poll.PollMessage;
import google.registry.persistence.VKey;
import google.registry.util.NullIgnoringCollectionBuilder;
import jakarta.persistence.AttributeOverride;
import jakarta.persistence.AttributeOverrides;
import jakarta.persistence.Column;
import jakarta.persistence.Embedded;
import jakarta.persistence.MappedSuperclass;
import java.util.Set;
import javax.annotation.Nullable;
/**
* Common transfer data for {@link EppResource} types. Only applies to domains and contacts; hosts
* are implicitly transferred with their superordinate domain.
*/
@MappedSuperclass
public abstract class TransferData extends BaseTransferObject implements Buildable {
/** The transaction id of the most recent transfer request (or null if there never was one). */
@Embedded
@AttributeOverrides({
@AttributeOverride(
name = "serverTransactionId",
column = @Column(name = "transfer_server_txn_id")),
@AttributeOverride(
name = "clientTransactionId",
column = @Column(name = "transfer_client_txn_id"))
})
Trid transferRequestTrid;
@Column(name = "transfer_repo_id")
String repoId;
@Column(name = "transfer_history_entry_id")
Long historyEntryId;
// The pollMessageId1 and pollMessageId2 are used to store the IDs for gaining and losing poll
// messages in Cloud SQL.
//
// In addition, there may be a third poll message for the autorenew poll message on domain
// transfer if applicable.
@Column(name = "transfer_poll_message_id_1")
Long pollMessageId1;
@Column(name = "transfer_poll_message_id_2")
Long pollMessageId2;
@Column(name = "transfer_poll_message_id_3")
Long pollMessageId3;
public abstract boolean isEmpty();
public Long getHistoryEntryId() {
return historyEntryId;
}
@Nullable
public Trid getTransferRequestTrid() {
return transferRequestTrid;
}
public ImmutableSet<VKey<? extends TransferServerApproveEntity>> getServerApproveEntities() {
return NullIgnoringCollectionBuilder.create(
new ImmutableSet.Builder<VKey<? extends TransferServerApproveEntity>>())
.add(pollMessageId1 != null ? VKey.create(PollMessage.class, pollMessageId1) : null)
.add(pollMessageId2 != null ? VKey.create(PollMessage.class, pollMessageId2) : null)
.add(pollMessageId3 != null ? VKey.create(PollMessage.class, pollMessageId3) : null)
.getBuilder()
.build();
}
@Override
public abstract Builder<?, ?> asBuilder();
protected abstract Builder<?, ?> createEmptyBuilder();
/**
* Returns a fresh Builder populated only with the constant fields of this TransferData, i.e.
* those that are fixed and unchanging throughout the transfer process.
*
* <p>These fields are:
*
* <ul>
* <li>transferRequestTrid
* <li>transferRequestTime
* <li>gainingClientId
* <li>losingClientId
* <li>transferPeriod
* </ul>
*/
public Builder<?, ?> copyConstantFieldsToBuilder() {
Builder<?, ?> newBuilder = createEmptyBuilder();
newBuilder
.setTransferRequestTrid(transferRequestTrid)
.setTransferRequestTime(transferRequestTime)
.setGainingRegistrarId(gainingClientId)
.setLosingRegistrarId(losingClientId);
return newBuilder;
}
/** Maps serverApproveEntities set to the individual fields. */
static void mapServerApproveEntitiesToFields(
Set<VKey<? extends TransferServerApproveEntity>> serverApproveEntities,
TransferData transferData) {
if (isNullOrEmpty(serverApproveEntities)) {
transferData.pollMessageId1 = null;
transferData.pollMessageId2 = null;
transferData.pollMessageId3 = null;
return;
}
ImmutableList<Long> sortedPollMessageIds = getSortedPollMessageIds(serverApproveEntities);
if (sortedPollMessageIds.size() >= 1) {
transferData.pollMessageId1 = sortedPollMessageIds.get(0);
}
if (sortedPollMessageIds.size() >= 2) {
transferData.pollMessageId2 = sortedPollMessageIds.get(1);
}
if (sortedPollMessageIds.size() >= 3) {
transferData.pollMessageId3 = sortedPollMessageIds.get(2);
}
}
/**
* Gets poll message IDs from the given serverApproveEntities and sorted the IDs in natural order.
*/
private static ImmutableList<Long> getSortedPollMessageIds(
Set<VKey<? extends TransferServerApproveEntity>> serverApproveEntities) {
return nullToEmpty(serverApproveEntities).stream()
.filter(vKey -> PollMessage.class.isAssignableFrom(vKey.getKind()))
.map(vKey -> (long) vKey.getKey())
.sorted()
.collect(toImmutableList());
}
/** Builder for {@link TransferData} because it is immutable. */
public abstract static class Builder<T extends TransferData, B extends Builder<T, B>>
extends BaseTransferObject.Builder<T, B> {
/** Create a {@link Builder} wrapping a new instance. */
protected Builder() {}
/** Create a {@link Builder} wrapping the given instance. */
protected Builder(T instance) {
super(instance);
}
public B setTransferRequestTrid(Trid transferRequestTrid) {
getInstance().transferRequestTrid = transferRequestTrid;
return thisCastToDerived();
}
public B setServerApproveEntities(
String repoId,
Long historyId,
ImmutableSet<VKey<? extends TransferServerApproveEntity>> serverApproveEntities) {
getInstance().repoId = repoId;
getInstance().historyEntryId = historyId;
mapServerApproveEntitiesToFields(serverApproveEntities, getInstance());
return thisCastToDerived();
}
@Override
public T build() {
if (getInstance().pollMessageId1 != null) {
checkState(getInstance().repoId != null, "Repo id undefined");
checkState(getInstance().historyEntryId != null, "History entry undefined");
}
return super.build();
}
}
/**
* Marker interface for objects that are written in anticipation of a server approval, and
* therefore need to be deleted under any other outcome.
*/
public interface TransferServerApproveEntity {
VKey<? extends TransferServerApproveEntity> createVKey();
}
}

View File

@@ -23,7 +23,6 @@ import google.registry.model.domain.secdns.DomainDsData;
import google.registry.model.eppcommon.StatusValue;
import google.registry.model.rde.RdeMode;
import google.registry.model.transfer.DomainTransferData;
import google.registry.model.transfer.TransferData;
import google.registry.util.Idn;
import google.registry.xjc.domain.XjcDomainNsType;
import google.registry.xjc.domain.XjcDomainStatusType;
@@ -221,7 +220,7 @@ final class DomainToXjcConverter {
&& !Strings.isNullOrEmpty(model.getTransferData().getLosingRegistrarId());
}
/** Converts {@link TransferData} to {@link XjcRdeDomainTransferDataType}. */
/** Converts {@link DomainTransferData} to {@link XjcRdeDomainTransferDataType}. */
private static XjcRdeDomainTransferDataType convertTransferData(DomainTransferData model) {
XjcRdeDomainTransferDataType bean = new XjcRdeDomainTransferDataType();
bean.setTrStatus(

View File

@@ -65,7 +65,7 @@ public class BulkDomainTransferCommand extends ConfirmingCommand implements Comm
@Parameter(
names = {"-d", "--domain_names_file"},
description = "A file with a list of newline-delimited domain names to create tokens for")
description = "A file with a list of newline-delimited domain names to transfer")
private String domainNamesFile;
@Parameter(
@@ -82,7 +82,7 @@ public class BulkDomainTransferCommand extends ConfirmingCommand implements Comm
@Parameter(
names = {"--reason"},
description = "Reason to transfer the domains",
description = "Reason to transfer the domains, possibly a bug number",
required = true)
private String reason;

View File

@@ -61,10 +61,6 @@ public class PasswordResetRequestAction extends ConsoleApiAction {
@Override
protected void postHandler(User user) {
// Temporary flag when testing email sending etc
if (!user.getUserRoles().isAdmin()) {
setFailedResponse("", HttpServletResponse.SC_FORBIDDEN);
}
tm().transact(() -> performRequest(user));
consoleApiParams.response().setStatus(HttpServletResponse.SC_OK);
}

View File

@@ -23,6 +23,7 @@ import static google.registry.ui.server.console.PasswordResetRequestAction.check
import com.google.common.base.Strings;
import com.google.common.collect.ImmutableMap;
import google.registry.model.console.ConsolePermission;
import google.registry.model.console.ConsoleUpdateHistory;
import google.registry.model.console.PasswordResetRequest;
import google.registry.model.console.User;
import google.registry.model.registrar.Registrar;
@@ -59,11 +60,6 @@ public class PasswordResetVerifyAction extends ConsoleApiAction {
@Override
protected void getHandler(User user) {
// Temporary flag when testing email sending etc
if (!user.getUserRoles().isAdmin()) {
setFailedResponse("", HttpServletResponse.SC_FORBIDDEN);
return;
}
PasswordResetRequest request = tm().transact(() -> loadAndValidateResetRequest(user));
ImmutableMap<String, ?> result =
ImmutableMap.of("type", request.getType(), "registrarId", request.getRegistrarId());
@@ -73,11 +69,6 @@ public class PasswordResetVerifyAction extends ConsoleApiAction {
@Override
protected void postHandler(User user) {
// Temporary flag when testing email sending etc
if (!user.getUserRoles().isAdmin()) {
setFailedResponse("", HttpServletResponse.SC_FORBIDDEN);
return;
}
checkArgument(!Strings.isNullOrEmpty(newPassword.orElse(null)), "Password must be provided");
tm().transact(
() -> {
@@ -87,6 +78,16 @@ public class PasswordResetVerifyAction extends ConsoleApiAction {
case REGISTRY_LOCK -> handleRegistryLockPasswordReset(request);
}
tm().put(request.asBuilder().setFulfillmentTime(tm().getTransactionTime()).build());
finishAndPersistConsoleUpdateHistory(
new ConsoleUpdateHistory.Builder()
.setType(ConsoleUpdateHistory.Type.EPP_PASSWORD_UPDATE)
.setDescription(
String.format(
"%s%s%s",
request.getRegistrarId(),
ConsoleUpdateHistory.DESCRIPTION_SEPARATOR,
"Password reset fulfilled via verification code")));
});
consoleApiParams.response().setStatus(HttpServletResponse.SC_OK);
}
@@ -110,6 +111,11 @@ public class PasswordResetVerifyAction extends ConsoleApiAction {
PasswordResetRequest request =
tm().loadByKeyIfPresent(VKey.create(PasswordResetRequest.class, verificationCode))
.orElseThrow(this::createVerificationCodeException);
if (request.getFulfillmentTime().isPresent()) {
throw new IllegalArgumentException("This reset request has already been used.");
}
ConsolePermission requiredVerifyPermission =
switch (request.getType()) {
case EPP -> ConsolePermission.MANAGE_USERS;

View File

@@ -235,10 +235,7 @@ public class RdePipelineTest {
persistHostHistory(host1);
Domain helloDomain =
persistEppResource(
newDomain("hello.soy")
.asBuilder()
.addNameserver(host1.createVKey())
.build());
newDomain("hello.soy").asBuilder().addNameserver(host1.createVKey()).build());
persistDomainHistory(helloDomain);
persistHostHistory(persistActiveHost("not-used-subordinate.hello.soy"));
Host host2 = persistActiveHost("ns1.hello.soy");

View File

@@ -41,7 +41,7 @@ import google.registry.model.eppcommon.StatusValue;
import google.registry.model.host.Host;
import google.registry.model.reporting.HistoryEntry;
import google.registry.model.tld.Tld;
import google.registry.model.transfer.TransferData;
import google.registry.model.transfer.DomainTransferData;
import google.registry.model.transfer.TransferStatus;
import google.registry.persistence.transaction.JpaTransactionManagerExtension;
import org.joda.time.DateTime;
@@ -161,7 +161,8 @@ abstract class DomainTransferFlowTestCase<F extends Flow, R extends EppResource>
.build();
}
void assertTransferFailed(Domain domain, TransferStatus status, TransferData oldTransferData) {
void assertTransferFailed(
Domain domain, TransferStatus status, DomainTransferData oldTransferData) {
assertAboutDomains()
.that(domain)
.doesNotHaveStatusValue(StatusValue.PENDING_TRANSFER)

View File

@@ -54,7 +54,7 @@ import google.registry.model.poll.PollMessage;
import google.registry.model.reporting.DomainTransactionRecord;
import google.registry.model.reporting.HistoryEntry;
import google.registry.model.tld.Tld;
import google.registry.model.transfer.TransferData;
import google.registry.model.transfer.DomainTransferData;
import google.registry.model.transfer.TransferResponse;
import google.registry.model.transfer.TransferStatus;
import org.joda.time.DateTime;
@@ -90,7 +90,7 @@ class DomainTransferRejectFlowTest
assertMutatingFlow(true);
DateTime originalExpirationTime = domain.getRegistrationExpirationTime();
ImmutableSet<GracePeriod> originalGracePeriods = domain.getGracePeriods();
TransferData originalTransferData = domain.getTransferData();
DomainTransferData originalTransferData = domain.getTransferData();
runFlowAssertResponse(loadFile(expectedXmlFilename));
// Transfer should have been rejected. Verify correct fields were set.
domain = reloadResourceByForeignKey();

View File

@@ -29,8 +29,8 @@ import org.joda.time.DateTime;
import org.junit.jupiter.api.BeforeEach;
import org.junit.jupiter.api.Test;
/** Unit tests for {@link TransferData}. */
public class TransferDataTest {
/** Unit tests for {@link DomainTransferData}. */
public class DomainTransferDataTest {
private final DateTime now = DateTime.now(UTC);

View File

@@ -34,7 +34,6 @@ import java.util.Optional;
import javax.annotation.Nullable;
import org.joda.time.Duration;
import org.junit.jupiter.api.BeforeEach;
import org.junit.jupiter.api.Disabled;
import org.junit.jupiter.api.Test;
/** Tests for {@link PasswordResetVerifyAction}. */
@@ -111,28 +110,24 @@ public class PasswordResetVerifyActionTest extends ConsoleActionBaseTestCase {
}
@Test
@Disabled("Enable when testing is done in sandbox and isAdmin check is removed")
void testFailure_get_epp_badPermission() throws Exception {
createAction(createTechUser(), "GET", verificationCode, null).run();
assertThat(response.getStatus()).isEqualTo(HttpServletResponse.SC_FORBIDDEN);
}
@Test
@Disabled("Enable when testing is done in sandbox and isAdmin check is removed")
void testFailure_get_lock_badPermission() throws Exception {
createAction(createAccountManager(), "GET", verificationCode, null).run();
assertThat(response.getStatus()).isEqualTo(HttpServletResponse.SC_FORBIDDEN);
}
@Test
@Disabled("Enable when testing is done in sandbox and isAdmin check is removed")
void testFailure_post_epp_badPermission() throws Exception {
createAction(createTechUser(), "POST", verificationCode, "newPassword").run();
assertThat(response.getStatus()).isEqualTo(HttpServletResponse.SC_FORBIDDEN);
}
@Test
@Disabled("Enable when testing is done in sandbox and isAdmin check is removed")
void testFailure_post_lock_badPermission() throws Exception {
createAction(createAccountManager(), "POST", verificationCode, "newPassword").run();
assertThat(response.getStatus()).isEqualTo(HttpServletResponse.SC_FORBIDDEN);

Binary file not shown.

Before

Width:  |  Height:  |  Size: 35 KiB

After

Width:  |  Height:  |  Size: 35 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 52 KiB

After

Width:  |  Height:  |  Size: 52 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 38 KiB

After

Width:  |  Height:  |  Size: 38 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 93 KiB

After

Width:  |  Height:  |  Size: 93 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 35 KiB

After

Width:  |  Height:  |  Size: 35 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 95 KiB

After

Width:  |  Height:  |  Size: 96 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 38 KiB

After

Width:  |  Height:  |  Size: 38 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 48 KiB

After

Width:  |  Height:  |  Size: 48 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 72 KiB

After

Width:  |  Height:  |  Size: 72 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 42 KiB

After

Width:  |  Height:  |  Size: 42 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 45 KiB

After

Width:  |  Height:  |  Size: 45 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 45 KiB

After

Width:  |  Height:  |  Size: 45 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 74 KiB

After

Width:  |  Height:  |  Size: 74 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 38 KiB

After

Width:  |  Height:  |  Size: 38 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 57 KiB

After

Width:  |  Height:  |  Size: 56 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 62 KiB

After

Width:  |  Height:  |  Size: 62 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 76 KiB

After

Width:  |  Height:  |  Size: 76 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 42 KiB

After

Width:  |  Height:  |  Size: 42 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 76 KiB

After

Width:  |  Height:  |  Size: 76 KiB

View File

@@ -261,7 +261,7 @@ td.section {
</tr>
<tr>
<td class="property_name">generated on</td>
<td class="property_value">2026-02-20 05:51:31</td>
<td class="property_value">2026-02-20 20:45:48</td>
</tr>
<tr>
<td class="property_name">last flyway file</td>
@@ -273,7 +273,7 @@ td.section {
<p>&nbsp;</p>
<svg viewBox="0.00 0.00 4846.00 3765.00" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" id="erDiagram" style="overflow: hidden; width: 100%; height: 800px">
<g id="graph0" class="graph" transform="scale(1 1) rotate(0) translate(4 3761)">
<title>SchemaCrawler_Diagram</title> <polygon fill="white" stroke="transparent" points="-4,4 -4,-3761 4842,-3761 4842,4 -4,4" /> <text text-anchor="start" x="4598" y="-29.8" font-family="Helvetica,sans-Serif" font-size="14.00">generated by</text> <text text-anchor="start" x="4681" y="-29.8" font-family="Helvetica,sans-Serif" font-size="14.00">SchemaCrawler 16.27.1</text> <text text-anchor="start" x="4597" y="-10.8" font-family="Helvetica,sans-Serif" font-size="14.00">generated on</text> <text text-anchor="start" x="4681" y="-10.8" font-family="Helvetica,sans-Serif" font-size="14.00">2026-02-20 05:51:31</text> <polygon fill="none" stroke="#888888" points="4594,-4 4594,-44 4830,-44 4830,-4 4594,-4" /> <!-- allocationtoken_a08ccbef -->
<title>SchemaCrawler_Diagram</title> <polygon fill="white" stroke="transparent" points="-4,4 -4,-3761 4842,-3761 4842,4 -4,4" /> <text text-anchor="start" x="4598" y="-29.8" font-family="Helvetica,sans-Serif" font-size="14.00">generated by</text> <text text-anchor="start" x="4681" y="-29.8" font-family="Helvetica,sans-Serif" font-size="14.00">SchemaCrawler 16.27.1</text> <text text-anchor="start" x="4597" y="-10.8" font-family="Helvetica,sans-Serif" font-size="14.00">generated on</text> <text text-anchor="start" x="4681" y="-10.8" font-family="Helvetica,sans-Serif" font-size="14.00">2026-02-20 20:45:48</text> <polygon fill="none" stroke="#888888" points="4594,-4 4594,-44 4830,-44 4830,-4 4594,-4" /> <!-- allocationtoken_a08ccbef -->
<g id="node1" class="node">
<title>allocationtoken_a08ccbef</title> <polygon fill="#e9c2f2" stroke="transparent" points="525.5,-1272 525.5,-1291 711.5,-1291 711.5,-1272 525.5,-1272" /> <text text-anchor="start" x="527.5" y="-1278.8" font-family="Helvetica,sans-Serif" font-weight="bold" font-style="italic" font-size="14.00">public."AllocationToken"</text> <polygon fill="#e9c2f2" stroke="transparent" points="711.5,-1272 711.5,-1291 785.5,-1291 785.5,-1272 711.5,-1272" /> <text text-anchor="start" x="746.5" y="-1277.8" font-family="Helvetica,sans-Serif" font-size="14.00">[table]</text> <text text-anchor="start" x="527.5" y="-1259.8" font-family="Helvetica,sans-Serif" font-weight="bold" font-style="italic" font-size="14.00">token</text> <text text-anchor="start" x="705.5" y="-1258.8" font-family="Helvetica,sans-Serif" font-size="14.00"> </text> <text text-anchor="start" x="713.5" y="-1258.8" font-family="Helvetica,sans-Serif" font-size="14.00">text not null</text> <text text-anchor="start" x="527.5" y="-1239.8" font-family="Helvetica,sans-Serif" font-size="14.00">domain_name</text> <text text-anchor="start" x="705.5" y="-1239.8" font-family="Helvetica,sans-Serif" font-size="14.00"> </text> <text text-anchor="start" x="713.5" y="-1239.8" font-family="Helvetica,sans-Serif" font-size="14.00">text</text> <text text-anchor="start" x="527.5" y="-1220.8" font-family="Helvetica,sans-Serif" font-size="14.00">redemption_domain_repo_id</text> <text text-anchor="start" x="705.5" y="-1220.8" font-family="Helvetica,sans-Serif" font-size="14.00"> </text> <text text-anchor="start" x="713.5" y="-1220.8" font-family="Helvetica,sans-Serif" font-size="14.00">text</text> <text text-anchor="start" x="527.5" y="-1201.8" font-family="Helvetica,sans-Serif" font-size="14.00">token_type</text> <text text-anchor="start" x="705.5" y="-1201.8" font-family="Helvetica,sans-Serif" font-size="14.00"> </text> <text text-anchor="start" x="713.5" y="-1201.8" font-family="Helvetica,sans-Serif" font-size="14.00">text</text> <polygon fill="none" stroke="#888888" points="524.5,-1195.5 524.5,-1292.5 786.5,-1292.5 786.5,-1195.5 524.5,-1195.5" />
</g>

File diff suppressed because one or more lines are too long

View File

@@ -206,20 +206,20 @@
tech_contact text,
tld text,
transfer_billing_cancellation_id bigint,
transfer_history_entry_id bigint,
transfer_poll_message_id_1 bigint,
transfer_poll_message_id_2 bigint,
transfer_poll_message_id_3 bigint,
transfer_repo_id text,
transfer_billing_recurrence_id bigint,
transfer_autorenew_poll_message_id bigint,
transfer_autorenew_poll_message_history_id bigint,
transfer_billing_event_id bigint,
transfer_renew_period_unit text check (transfer_renew_period_unit in ('YEARS','MONTHS')),
transfer_renew_period_value integer,
transfer_registration_expiration_time timestamp(6) with time zone,
transfer_history_entry_id bigint,
transfer_poll_message_id_1 bigint,
transfer_poll_message_id_2 bigint,
transfer_poll_message_id_3 bigint,
transfer_repo_id text,
transfer_client_txn_id text,
transfer_server_txn_id text,
transfer_registration_expiration_time timestamp(6) with time zone,
transfer_gaining_registrar_id text,
transfer_losing_registrar_id text,
transfer_pending_expiration_time timestamp(6) with time zone,
@@ -278,20 +278,20 @@
tech_contact text,
tld text,
transfer_billing_cancellation_id bigint,
transfer_history_entry_id bigint,
transfer_poll_message_id_1 bigint,
transfer_poll_message_id_2 bigint,
transfer_poll_message_id_3 bigint,
transfer_repo_id text,
transfer_billing_recurrence_id bigint,
transfer_autorenew_poll_message_id bigint,
transfer_autorenew_poll_message_history_id bigint,
transfer_billing_event_id bigint,
transfer_renew_period_unit text check (transfer_renew_period_unit in ('YEARS','MONTHS')),
transfer_renew_period_value integer,
transfer_registration_expiration_time timestamp(6) with time zone,
transfer_history_entry_id bigint,
transfer_poll_message_id_1 bigint,
transfer_poll_message_id_2 bigint,
transfer_poll_message_id_3 bigint,
transfer_repo_id text,
transfer_client_txn_id text,
transfer_server_txn_id text,
transfer_registration_expiration_time timestamp(6) with time zone,
transfer_gaining_registrar_id text,
transfer_losing_registrar_id text,
transfer_pending_expiration_time timestamp(6) with time zone,

View File

@@ -1,13 +1,12 @@
# Admin tool
Nomulus includes a command-line registry administration tool that is invoked
using the `nomulus` command. It has the ability to view and change a large
number of things in a live Nomulus environment, including creating registrars,
updating premium and reserved lists, running an EPP command from a given XML
file, and performing various backend tasks like re-running RDE if the most
Nomulus includes a command-line registry administration tool. It has the ability
to view and change a large number of things in a live Nomulus environment,
including creating registrars, running arbitrary EPP commands from given XML
files, and performing various backend tasks like re-running RDE if the most
recent export failed. Its code lives inside the tools package
(`java/google/registry/tools`), and is compiled by building the `nomulus` target
in the Bazel BUILD file in that package.
(`core/src/main/java/google/registry/tools`), and is compiled by building the
`nomulus` Gradle target in the `core` project, e.g. `./gradlew core:nomulus`.
The tool connects to the Google Cloud Platform project (identified by project
ID) that was configured in your implementation of `RegistryConfig` when the tool
@@ -21,18 +20,25 @@ ID is also "acme-registry", and the project ID for the sandbox environment is
## Build the tool
To build the `nomulus` tool, execute the following `bazel build` command inside
any directory of the codebase. You must rebuild the tool any time that you edit
configuration or make database schema changes.
To build the `nomulus` tool's jarfile, execute the following Gradle command
inside the project's home directory: `./gradlew core:nomulus`. You must rebuild
the tool any time that you edit configuration or make database schema changes.
Note that proper project configuration is necessary for building the tool --
this includes the specialized configuration such as GCP project names.
It's recommended that you alias the compiled jarfile located at
`core/build/libs/nomulus.jar` (or add it to your shell path) so that you can run
it easily, e.g.
```shell
$ bazel build //java/google/registry/tools:nomulus
$ alias nomulus="java -jar core/build/libs/nomulus.jar"
```
It's recommended that you alias the compiled binary located at
`bazel-genfiles/java/google/registry/nomulus` (or add it to your shell path) so
that you can run it easily. The rest of this guide assumes that it has been
aliased to `nomulus`.
The rest of this guide assumes that it has been aliased to `nomulus`.
Note: for Google Registry employees, the nomulus tool is built as part of the
weekly deployment process and the nomulus jarfile is located at
`/google/data/ro/teams/domain-registry/tools/live/nomulus.jar`
## Running the tool
@@ -56,33 +62,27 @@ metadata contained within the code to yield documentation.
## Local and server-side commands
There are two broad ways that commands are implemented: some that send requests
to `ToolsServlet` to execute the action on the server (these commands implement
`ServerSideCommand`), and others that execute the command locally using the
[Remote API](https://cloud.google.com/appengine/docs/java/tools/remoteapi)
(these commands implement `RemoteApiCommand`). Server-side commands take more
work to implement because they require both a client and a server-side
component.
However, they are fully capable of doing anything that is possible with App
Engine, including running a large MapReduce, because they execute on the tools
service in the App Engine cloud.
There are two broad ways that commands are implemented: some send requests to
the backend server to execute the action on the server (these commands implement
`CommandWithConnection`), and others that execute the command locally using
access to the database. Commands that send requests to the backend server are
more work to implement because they require both a client-side and server-side
component, but they are more powerful -- even running Flow pipelines or other
long-running intensive jobs.
Local commands, by contrast, are easier to implement, because there is only a
local component to write, but they aren't as powerful. A general rule of thumb
for making this determination is to use a local command if possible, or a
server-side command otherwise.
Local commands are easier to implement (because there is only a local component
to write) but they aren't as powerful. As a rule of thumb, use a local command
if possible.
## Common tool patterns
All tools ultimately implement the `Command` interface located in the `tools`
package. If you use an integrated development environment (IDE) such as IntelliJ
to view the type hierarchy of that interface, you'll see all of the commands
that exist, as well as how a lot of them are grouped using sub-interfaces or
abstract classes that provide additional functionality. The most common patterns
that are used by a large number of other tools are:
to view the type hierarchy of that interface, you'll see all the commands that
exist, as well as how a lot of them are grouped using sub-interfaces or abstract
classes that provide additional functionality. The most common patterns that are
used by a large number of other tools are:
* **`BigqueryCommand`** -- Provides a connection to BigQuery for tools that
need it.
* **`ConfirmingCommand`** -- Provides the methods `prompt()` and `execute()`
to override. `prompt()` outputs a message (usually what the command is going
to do) and prompts the user to confirm execution of the command, and then
@@ -90,10 +90,9 @@ that are used by a large number of other tools are:
* **`EppToolCommand`** -- Commands that work by executing EPP commands against
the server, usually by filling in a template with parameters that were
passed on the command-line.
* **`MutatingEppToolCommand`** -- A sub-class of `EppToolCommand` that
provides a `--dry_run` flag, that, if passed, will display the output from
the server of what the command would've done without actually committing
those changes.
* **`MutatingEppToolCommand`** -- A subclass of `EppToolCommand` that provides
a `--dry_run` flag, that, if passed, will display the output from the server
of what the command would've done without actually committing those changes.
* **`GetEppResourceCommand`** -- Gets individual EPP resources from the server
and outputs them.
* **`ListObjectsCommand`** -- Lists all objects of a specific type from the

View File

@@ -1,153 +1,97 @@
# Architecture
This document contains information on the overall architecture of Nomulus on
[Google Cloud Platform](https://cloud.google.com/). It covers the App Engine
architecture as well as other Cloud Platform services used by Nomulus.
[Google Cloud Platform](https://cloud.google.com/).
## App Engine
Nomulus was originally built for App Engine, but the modern architecture now
uses Google Kubernetes Engine (GKE) for better flexibility and control over
networking, running as a series of Java-based microservices within GKE pods.
[Google App Engine](https://cloud.google.com/appengine/) is a cloud computing
platform that runs web applications in the form of servlets. Nomulus consists of
Java servlets that process web requests. These servlets use other features
provided by App Engine, including task queues and cron jobs, as explained
below.
In addition, because GKE (and standard HTTP load balancers) typically handle
HTTP(s) traffic, Nomulus uses a custom proxy to handle raw TCP traffic required
for EPP (Port 700). This proxy can run as a GKE sidecar or a standalone cluster.
For more information on the proxy, see [the proxy setup guide](proxy-setup.md).
### Services
### Workloads
Nomulus contains three [App Engine
services](https://cloud.google.com/appengine/docs/python/an-overview-of-app-engine),
which were previously called modules in earlier versions of App Engine. The
services are: default (also called front-end), backend, and tools. Each service
runs independently in a lot of ways, including that they can be upgraded
individually, their log outputs are separate, and their servers and configured
scaling are separate as well.
Nomulus contains four Kubernetes
[workloads](https://kubernetes.io/docs/concepts/workloads/). Each workload is
fairly independent as one would expect, including scaling.
Once you have your app deployed and running, the default service can be accessed
at `https://project-id.appspot.com`, substituting whatever your App Engine app
is named for "project-id". Note that that is the URL for the production instance
of your app; other environments will have the environment name appended with a
hyphen in the hostname, e.g. `https://project-id-sandbox.appspot.com`.
The four workloads are referred to as `frontend`, `backend`, `console`, and
`pubapi`.
The URL for the backend service is `https://backend-dot-project-id.appspot.com`
and the URL for the tools service is `https://tools-dot-project-id.appspot.com`.
The reason that the dot is escaped rather than forming subdomains is because the
SSL certificate for `appspot.com` is only valid for `*.appspot.com` (no double
wild-cards).
Each workload's URL is created by prefixing the name of the workload to the base
domain, e.g. `https://pubapi.mydomain.example`. Requests to each workload are
all handled by the
[RegistryServlet](https://github.com/google/nomulus/blob/master/core/src/main/java/google/registry/module/RegistryServlet.java)
#### Default service
#### Frontend workload
The default service is responsible for all registrar-facing
The frontend workload is responsible for all registrar-facing
[EPP](https://en.wikipedia.org/wiki/Extensible_Provisioning_Protocol) command
traffic, all user-facing WHOIS and RDAP traffic, and the admin and registrar web
consoles, and is thus the most important service. If the service has any
problems and goes down or stops servicing requests in a timely manner, it will
begin to impact users immediately. Requests to the default service are handled
by the `FrontendServlet`, which provides all of the endpoints exposed in
`FrontendRequestComponent`.
traffic. If the workload has any problems or goes down, it will begin to impact
users immediately.
#### Backend service
#### PubApi workload
The backend service is responsible for executing all regularly scheduled
background tasks (using cron) as well as all asynchronous tasks. Requests to the
backend service are handled by the `BackendServlet`, which provides all of the
endpoints exposed in `BackendRequestComponent`. These include tasks for
generating/exporting RDE, syncing the trademark list from TMDB, exporting
backups, writing out DNS updates, handling asynchronous contact and host
deletions, writing out commit logs, exporting metrics to BigQuery, and many
more. Issues in the backend service will not immediately be apparent to end
users, but the longer it is down, the more obvious it will become that
user-visible tasks such as DNS and deletion are not being handled in a timely
manner.
The PubApi (Public API) workload is responsible for all public traffic to the
registry. In practice, this primarily consists of RDAP traffic. This is split
into a separate workload so that public users (without authentication) will have
a harder time impacting intra-registry or registrar-registry actions.
The backend service is also where scheduled and automatically invoked MapReduces
run, which includes some of the aforementioned tasks such as RDE and
asynchronous resource deletion. Consequently, the backend service should be
sized to support not just the normal ongoing DNS load but also the load incurred
by MapReduces, both scheduled (such as RDE) and on-demand (asynchronous
contact/host deletion).
#### Backend workload
#### BSA service
The backend workload is responsible for executing all regularly scheduled
background tasks (using cron) as well as all asynchronous tasks. These include
tasks for generating/exporting RDE, syncing the trademark list from TMDB,
exporting backups, writing out DNS updates, syncing BSA data,
generating/exporting ICANN activity data, and many more. Issues in the backend
workload will not immediately be apparent to end users, but the longer it is
down, the more obvious it will become that user-visible tasks such as DNS and
deletion are not being handled in a timely manner.
The bsa service is responsible for business logic behind Nomulus and BSA
functionality. Requests to the backend service are handled by the `BsaServlet`,
which provides all of the endpoints exposed in `BsaRequestComponent`. These
include tasks for downloading, processing and uploading BSA data.
The backend workload is also where scheduled and automatically-invoked BEAM
pipelines run, which includes some of the aforementioned tasks such as RDE.
Consequently, the backend workload should be sized to support not just the
normal ongoing DNS load but also the load incurred by BEAM pipelines, both
scheduled (such as RDE) and on-demand (started by registry employees).
The backend workload also supports handling of manually-performed actions using
the `nomulus` command-line tool, which provides administrative-level
functionality for developers and tech support employees of the registry.
#### Tools service
### Cloud Tasks queues
The tools service is responsible for servicing requests from the `nomulus`
command line tool, which provides administrative-level functionality for
developers and tech support employees of the registry. It is thus the least
critical of the three services. Requests to the tools service are handled by the
`ToolsServlet`, which provides all of the endpoints exposed in
`ToolsRequestComponent`. Some example functionality that this service provides
includes the server-side code to update premium lists, run EPP commands from the
tool, and manually modify contacts/hosts/domains/and other resources. Problems
with the tools service are not visible to users.
The tools service also runs ad-hoc MapReduces, like those invoked via `nomulus`
tool subcommands like `generate_zone_files` and by manually hitting URLs under
https://tools-dot-project-id.appspot.com, like
`/_dr/task/refreshDnsForAllDomains`.
### Task queues
App Engine [task
queues](https://cloud.google.com/appengine/docs/java/taskqueue/) provide an
GCP's [Cloud Tasks](https://docs.cloud.google.com/tasks/docs) provides an
asynchronous way to enqueue tasks and then execute them on some kind of
schedule. There are two types of queues, push queues and pull queues. Tasks in
push queues are always executing up to some throttlable limit. Tasks in pull
queues remain there until the queue is polled by code that is running for some
other reason. Essentially, push queues run their own tasks while pull queues
just enqueue data that is used by something else. Many other parts of App Engine
are implemented using task queues. For example, [App Engine
cron](https://cloud.google.com/appengine/docs/java/config/cron) adds tasks to
push queues at regularly scheduled intervals, and the [MapReduce
framework](https://cloud.google.com/appengine/docs/java/dataprocessing/) adds
tasks for each phase of the MapReduce algorithm.
schedule. Task queues are essential because by nature, GKE architecture does not
support long-running background processes, and so queues are thus the
fundamental building block that allows asynchronous and background execution of
code that is not in response to incoming web requests.
Nomulus uses a particular pattern of paired push/pull queues that is worth
explaining in detail. Push queues are essential because App Engine's
architecture does not support long-running background processes, and so push
queues are thus the fundamental building block that allows asynchronous and
background execution of code that is not in response to incoming web requests.
However, they also have limitations in that they do not allow batch processing
or grouping. That's where the pull queue comes in. Regularly scheduled tasks in
the push queue will, upon execution, poll the corresponding pull queue for a
specified number of tasks and execute them in a batch. This allows the code to
execute in the background while taking advantage of batch processing.
The task queues used by Nomulus are configured in the `cloud-tasks-queue.xml`
file. Note that many push queues have a direct one-to-one correspondence with
entries in `cloud-scheduler-tasks-ENVIRONMENT.xml` because they need to be
fanned-out on a per-TLD or other basis (see the Cron section below for more
explanation). The exact queue that a given cron task will use is passed as the
query string parameter "queue" in the url specification for the cron task.
The task queues used by Nomulus are configured in the `cloud-tasks-queue.xml`
file. Note that many push queues have a direct one-to-one correspondence with
entries in `cloud-scheduler-tasks.xml` because they need to be fanned-out on a
per-TLD or other basis (see the Cron section below for more explanation).
The exact queue that a given cron task will use is passed as the query string
parameter "queue" in the url specification for the cron task.
Here are the task queues in use by the system. All are push queues unless
explicitly marked as otherwise.
Here are the task queues in use by the system:
* `brda` -- Queue for tasks to upload weekly Bulk Registration Data Access
(BRDA) files to a location where they are available to ICANN. The
`RdeStagingReducer` (part of the RDE MapReduce) creates these tasks at the
end of generating an RDE dump.
* `dns-pull` -- A pull queue to enqueue DNS modifications. Cron regularly runs
`ReadDnsQueueAction`, which drains the queue, batches modifications by TLD,
and writes the batches to `dns-publish` to be published to the configured
`DnsWriter` for the TLD.
(BRDA) files to a location where they are available to ICANN. The RDE
pipeline creates these tasks at the end of generating an RDE dump.
* `dns-publish` -- Queue for batches of DNS updates to be pushed to DNS
writers.
* `lordn-claims` and `lordn-sunrise` -- Pull queues for handling LORDN
exports. Tasks are enqueued synchronously during EPP commands depending on
whether the domain name in question has a claims notice ID.
* `dns-refresh` -- Queues for reading and fanning out DNS refresh requests,
using the `DnsRefreshRequest` SQL table as the source of data
* `marksdb` -- Queue for tasks to verify that an upload to NORDN was
successfully received and verified. These tasks are enqueued by
`NordnUploadAction` following an upload and are executed by
`NordnVerifyAction`.
* `nordn` -- Cron queue used for NORDN exporting. Tasks are executed by
`NordnUploadAction`, which pulls LORDN data from the `lordn-claims` and
`lordn-sunrise` pull queues (above).
`NordnUploadAction`
* `rde-report` -- Queue for tasks to upload RDE reports to ICANN following
successful upload of full RDE files to the escrow provider. Tasks are
enqueued by `RdeUploadAction` and executed by `RdeReportAction`.
@@ -157,28 +101,25 @@ explicitly marked as otherwise.
* `retryable-cron-tasks` -- Catch-all cron queue for various cron tasks that
run infrequently, such as exporting reserved terms.
* `sheet` -- Queue for tasks to sync registrar updates to a Google Sheets
spreadsheet. Tasks are enqueued by `RegistrarServlet` when changes are made
to registrar fields and are executed by `SyncRegistrarsSheetAction`.
spreadsheet, done by `SyncRegistrarsSheetAction`.
### Cron jobs
### Scheduled cron jobs
Nomulus uses App Engine [cron
jobs](https://cloud.google.com/appengine/docs/java/config/cron) to run periodic
scheduled actions. These actions run as frequently as once per minute (in the
case of syncing DNS updates) or as infrequently as once per month (in the case
of RDE exports). Cron tasks are specified in `cron.xml` files, with one per
environment. There are more tasks that run in Production than in other
environments because tasks like uploading RDE dumps are only done for the live
system. Cron tasks execute on the `backend` service.
Nomulus uses [Cloud Scheduler](https://docs.cloud.google.com/scheduler/docs) to
run periodic scheduled actions. These actions run as frequently as once per
minute (in the case of syncing DNS updates) or as infrequently as once per month
(in the case of RDE exports). Cron tasks are specified in
`cloud-scheduler-tasks-{ENVIRONMENT}.xml` files, with one per environment. There
are more tasks that run in Production than in other environments because tasks
like uploading RDE dumps are only done for the live system.
Most cron tasks use the `TldFanoutAction` which is accessed via the
`/_dr/cron/fanout` URL path. This action, which is run by the BackendServlet on
the backend service, fans out a given cron task for each TLD that exists in the
registry system, using the queue that is specified in the `cron.xml` entry.
Because some tasks may be computationally intensive and could risk spiking
system latency if all start executing immediately at the same time, there is a
`jitterSeconds` parameter that spreads out tasks over the given number of
seconds. This is used with DNS updates and commit log deletion.
`/_dr/cron/fanout` URL path. This action fans out a given cron task for each TLD
that exists in the registry system, using the queue that is specified in the XML
entry. Because some tasks may be computationally intensive and could risk
spiking system latency if all start executing immediately at the same time,
there is a `jitterSeconds` parameter that spreads out tasks over the given
number of seconds. This is used with DNS updates and commit log deletion.
The reason the `TldFanoutAction` exists is that a lot of tasks need to be done
separately for each TLD, such as RDE exports and NORDN uploads. It's simpler to
@@ -192,8 +133,7 @@ tasks retry in the face of transient errors.
The full list of URL parameters to `TldFanoutAction` that can be specified in
cron.xml is:
* `endpoint` -- The path of the action that should be executed (see
`web.xml`).
* `endpoint` -- The path of the action that should be executed
* `queue` -- The cron queue to enqueue tasks in.
* `forEachRealTld` -- Specifies that the task should be run in each TLD of
type `REAL`. This can be combined with `forEachTestTld`.
@@ -218,14 +158,14 @@ Each environment is thus completely independent.
The different environments are specified in `RegistryEnvironment`. Most
correspond to a separate App Engine app except for `UNITTEST` and `LOCAL`, which
by their nature do not use real environments running in the cloud. The
recommended naming scheme for the App Engine apps that has the best possible
compatibility with the codebase and thus requires the least configuration is to
pick a name for the production app and then suffix it for the other
environments. E.g., if the production app is to be named 'registry-platform',
then the sandbox app would be named 'registry-platform-sandbox'.
recommended project naming scheme that has the best possible compatibility with
the codebase and thus requires the least configuration is to pick a name for the
production app and then suffix it for the other environments. E.g., if the
production app is to be named 'registry-platform', then the sandbox app would be
named 'registry-platform-sandbox'.
The full list of environments supported out-of-the-box, in descending order from
real to not, is:
real to not-real, is:
* `PRODUCTION` -- The real production environment that is actually running
live TLDs. Since Nomulus is a shared registry platform, there need only ever
@@ -270,28 +210,28 @@ of experience running a production registry using this codebase.
## Cloud SQL
To be filled.
Nomulus uses [GCP Cloud SQL](https://cloud.google.com/sql) (Postgres) to store
information. For more information, see the
[DB project README file.](../db/README.md)
## Cloud Storage buckets
Nomulus uses [Cloud Storage](https://cloud.google.com/storage/) for bulk storage
of large flat files that aren't suitable for Cloud SQL. These files include
backups, RDE exports, and reports. Each bucket name must be unique across all of
Google Cloud Storage, so we use the common recommended pattern of prefixing all
buckets with the name of the App Engine app (which is itself globally unique).
Most of the bucket names are configurable, but the defaults are as follows, with
PROJECT standing in as a placeholder for the App Engine app name:
of large flat files that aren't suitable for SQL. These files include backups,
RDE exports, and reports. Each bucket name must be unique across all of Google
Cloud Storage, so we use the common recommended pattern of prefixing all buckets
with the name of the project (which is itself globally unique). Most of the
bucket names are configurable, but the most important / relevant defaults are:
* `PROJECT-billing` -- Monthly invoice files for each registrar.
* `PROJECT-commits` -- Daily exports of commit logs that are needed for
potentially performing a restore.
* `PROJECT-bsa` -- BSA data and output
* `PROJECT-domain-lists` -- Daily exports of all registered domain names per
TLD.
* `PROJECT-gcs-logs` -- This bucket is used at Google to store the GCS access
logs and storage data. This bucket is not required by the Registry system,
but can provide useful logging information. For instructions on setup, see
the [Cloud Storage
documentation](https://cloud.google.com/storage/docs/access-logs).
the
[Cloud Storage documentation](https://cloud.google.com/storage/docs/access-logs).
* `PROJECT-icann-brda` -- This bucket contains the weekly ICANN BRDA files.
There is no lifecycle expiration; we keep a history of all the files. This
bucket must exist for the BRDA process to function.
@@ -301,9 +241,3 @@ PROJECT standing in as a placeholder for the App Engine app name:
regularly uploaded to the escrow provider. Lifecycle is set to 90 days. The
bucket must exist.
* `PROJECT-reporting` -- Contains monthly ICANN reporting files.
* `PROJECT.appspot.com` -- Temporary MapReduce files are stored here. By
default, the App Engine MapReduce library places its temporary files in a
bucket named {project}.appspot.com. This bucket must exist. To keep
temporary files from building up, a 90-day or 180-day lifecycle should be
applied to the bucket, depending on how long you want to be able to go back
and debug MapReduce problems.

View File

@@ -8,61 +8,41 @@ requests will be authorized to invoke the action.
## Authentication and authorization properties
The `auth` attribute is an enumeration. Each value of the enumeration
corresponds to a triplet of properties:
corresponds to a pair of properties:
* the *authentication methods* allowed by the action
* the *minimum authentication level* which is authorized to run the action
* the *user policy* for the action
* the *minimum authentication level* which is authorized to run the action
* the *user policy* for the action
### Authentication methods
### Authentication Levels
Authentication methods are ways whereby the request can authenticate itself to
the system. In the code, an *authentication mechanism* is a class which handles
a particular authentication method. There are currently three methods:
There exist three levels of authentication level:
* `INTERNAL`: used by requests generated from App Engine task queues; these
requests do not have a user, because they are system-generated, so
authentication consists solely of verifying that the request did indeed
come from a task queue
* `NONE`: no authentication was found
* `APP`: the request was authenticated, but no user was present
* `USER`: the request was authenticated with a specific user
* `API`: authentication using an API; the Nomulus release ships with one API
authentication mechanism, OAuth 2, but you can write additional custom
mechanisms to handle other protocols if needed
`NONE` and `USER` are fairly straightforward results (either no authentication
was present, or a user was present), but `APP` is a bit of a special case. It
exists for requests coming from service accounts, Cloud Scheduler, or the
proxy -- requests which are authenticated but don't necessarily come from any
one particular "user" per se. That being said, authorized users *can* manually
run these tasks; it's just that service accounts can too.
* `LEGACY`: authentication using the standard App Engine `UserService` API,
which authenticates based on cookies and XSRF tokens
The details of the associated authentication mechanism classes are given later.
### Authentication levels
Each authentication method listed above can authenticate at one of three levels:
* `NONE`: no authentication was found
* `APP`: the request was authenticated, but no user was present
* `USER`: the request was authenticated with a specific user
For instance, `INTERNAL` authentication never returns an authentication level of
`USER`, because internal requests generated from App Engine task queues do not
execute as a particular end user account. `LEGACY` authentication, on the other
hand, never returns an authentication level of `APP`, because authentication is
predicated on identifying the user, so the only possible answers are `NONE` and
`USER`.
Each action has a minimum request authentication level. Some actions are
completely open to the public, and have a minimum level of `NONE`. Some require
authentication but not a user, and have a minimum level of `APP`. And some
cannot function properly without knowing the exact user, and have a minimum
level of `USER`.
Each action has a minimum request authentication level. Some actions (e.g. RDAP)
are completely open to the public, and have a minimum level of `NONE`. Some
require authentication but not necessarily a user, and have a minimum level of
`APP`. And some cannot function properly without knowing the exact user, and
have a minimum level of `USER`.
### User policy
The user policy indicates what kind of user is authorized to execute the action.
There are three possible values:
There are two possible values:
* `IGNORED`: the user information is ignored
* `PUBLIC`: an authenticated user is required, but any user will do
* `ADMIN`: there must be an authenticated user with admin privileges
* `PUBLIC`: an authenticated user is required, but any user will do
(authorization is done at a later state)
* `ADMIN`: there must be an authenticated user with admin privileges (this
includes service accounts)
Note that the user policy applies only to the automatic checking done by the
framework before invoking the action. The action itself may do more checking.
@@ -73,10 +53,6 @@ whether a user was found. If not, it issues a redirect to the login page.
Likewise, other pages of the registrar console have a user policy of `PUBLIC`,
meaning that any logged-in user can access the page. However, the code then
looks up the user to make sure he or she is associated with a registrar.
Admins can be granted permission to the registrar console by configuring a
special registrar for internal admin use, using the `registryAdminClientId`
setting. See the [global configuration
guide](./configuration.md#global-configuration) for more details.
Also note that the user policy only applies when there is actually a user. Some
actions can be executed either by an admin user or by an internal request coming
@@ -87,64 +63,41 @@ require that there be a user, set the minimum authentication level to `USER`.
### Allowed authentication and authorization values
Not all triplets of the authentication method, minimum level and user policy
make sense. A master enumeration lists all the valid triplets. They are:
There are three pairs of authentication level + user policy that are used in
Nomulus (or even make sense). These are:
* `AUTH_PUBLIC_ANONYMOUS`: Allow all access, and don't attempt to authenticate.
The only authentication method is `INTERNAL`, with a minimum level of
`NONE`. Internal requests will be flagged as such, but everything else
passes the authorization check with a value of `NOT_AUTHENTICATED`.
* `AUTH_PUBLIC`: Allow all access, but attempt to authenticate the user. All
three authentication methods are specified, with a minimum level of `NONE`
and a user policy of `PUBLIC`. If the user can be authenticated by any
means, the identity is passed to the request. But if not, the request still
passes the authorization check, with a value of `NOT_AUTHENTICATED`.
* `AUTH_PUBLIC_LOGGED_IN`: Allow access only by authenticated users. The
`API` and `LEGACY` authentication methods are supported, but not `INTERNAL`,
because that does not identify a user. The minimum level is `USER`, with a
user policy of `PUBLIC`. Only requests with a user authenticated via either
the legacy, cookie-based method or an API method (e.g. OAuth 2) are
authorized to run the action.
* `AUTH_INTERNAL_OR_ADMIN`: Allow access only by admin users or internal
requests. This is appropriate for actions that should only be accessed by
someone trusted (as opposed to anyone with a Google login). This currently
allows only the `INTERNAL` and `API` methods, meaning that an admin user
cannot authenticate themselves via the legacy authentication mechanism,
which is used only for the registrar console. The minimum level is `APP`,
because we don't require a user for internal requests, but the user policy
is `ADMIN`, meaning that if there *is* a user, it needs to be an admin.
* `AUTH_PUBLIC_OR_INTERNAL`: Allows anyone access, as long as they use OAuth to
authenticate. Also allows access from App Engine task-queue. Note that OAuth
client ID still needs to be allow-listed in the config file for OAuth-based
authentication to succeed. This is mainly used by the proxy.
* `AUTH_PUBLIC`: Allow all access and don't attempt to authenticate. This is
used for completely public endpoints such as RDAP.
* `AUTH_PUBLIC_LOGGED_IN`: Allow access only by users authenticated with some
type of OAuth token. This allows all users (`UserPolicy.PUBLIC`) but
requires that a particular user exists and is logged in (`AuthLevel.USER`).
This is used primarily for the registrar console.
* `AUTH_ADMIN`: Allow access only by admin users or internal requests
(including Cloud Scheduler tasks). This is appropriate for actions that
should only be accessed by someone trusted (as opposed to anyone with a
Google login). This permits app-internal authentication (`AuthLevel.APP`)
but if a user is present, it must be an admin (`UserPolicy.ADMIN`). This is
used by many automated requests, as well as the proxy.
### Action setting golden files
To make sure that the authentication and authorization settings are correct for
all actions, a unit test uses reflection to compare all defined actions for a
specific service to a golden file containing the correct settings. These files
are:
To make sure that the authentication and authorization settings are correct and
expected for all actions, a unit test uses reflection to compare all defined
actions for a specific service to a
[golden file](https://github.com/google/nomulus/blob/master/core/src/test/resources/google/registry/module/routing.txt)
containing the correct settings.
* `frontend_routing.txt` for the default (frontend) service
* `backend_routing.txt` for the backend service
* `tools_routing.txt` for the tools service
Each line in the file lists a path, the class that handles that path, the
allowable HTTP methods (meaning GET and POST, as opposed to the authentication
methods described above), the value of the `automaticallyPrintOk` attribute (not
relevant for purposes of this document), and the two authentication and
authorization settings described above. Whenever actions are added, or their
attributes are modified, the golden file needs to be updated.
Each of these files consists of lines listing a path, the class that handles
that path, the allowable HTTP methods (meaning GET and POST, as opposed to the
authentication methods described above), the value of the `automaticallyPrintOk`
attribute (not relevant for purposes of this document), and the three
authentication and authorization settings described above. Whenever actions are
added, or their attributes are modified, the golden files need to be updated.
The golden files also serve as a convenient place to check out how things are
set up. For instance, the tools actions are, for the most part, accessible to
admins and internal requests only. The backend actions are mostly accessible
only to internal requests. And the frontend actions are a grab-bag; some are
open to the public, some to any user, some only to admins, etc.
The golden file also serves as a convenient place to check out how things are
set up. For instance, the backend actions are accessible to admins and internal
requests only, the pubapi requests are open to the public, and console requests
require an authenticated user.
### Example
@@ -156,11 +109,12 @@ body rather than the URL itself (which could be logged). Therefore, the class
definition looks like:
```java
@Action(
path = "/_dr/epp",
method = Method.POST,
auth = Auth.AUTH_INTERNAL_OR_ADMIN
)
service = Action.Service.FRONTEND,
path = "/_dr/epp",
method = Method.POST,
auth = Auth.AUTH_ADMIN)
public class EppTlsAction implements Runnable {
...
```
@@ -169,8 +123,8 @@ and the corresponding line in frontend_routing.txt (including the header line)
is:
```shell
PATH CLASS METHODS OK AUTH_METHODS MIN USER_POLICY
/_dr/epp EppTlsAction POST n INTERNAL,API APP ADMIN
SERVICE PATH CLASS METHODS OK MIN USER_POLICY
FRONTEND /_dr/epp EppTlsAction POST n APP ADMIN
```
## Implementation
@@ -178,16 +132,12 @@ PATH CLASS METHODS OK AUTH_METHODS MIN USER_POLICY
The code implementing the authentication and authorization framework is
contained in the `google.registry.request.auth` package. The main method is
`authorize()`, in `RequestAuthenticator`. This method takes the auth settings
and an HTTP request, and tries to authenticate and authorize the request using
any of the specified methods, returning the result of its attempts. Note that
failed authorization (in which case `authorize()` returns `Optional.absent()`)
is different from the case where nothing can be authenticated, but the action
does not require any; in that case, `authorize()` succeeds, returning the
special result AuthResult.NOT_AUTHENTICATED.
There are separate classes (described below) for the mechanism which handles
each authentication method. The list of allowable API authentication mechanisms
(by default, just OAuth 2) is configured in `AuthModule`.
and an HTTP request, and tries to authenticate and authorize the request,
returning the result of its attempts. Note that failed authorization (in which
case `authorize()` returns `Optional.absent()`) is different from the case where
nothing can be authenticated, but the action does not require any; in that case,
`authorize()` succeeds, returning the special result
AuthResult.NOT_AUTHENTICATED.
The ultimate caller of `authorize()` is
`google.registry.request.RequestHandler`, which is responsible for routing
@@ -196,83 +146,51 @@ appropriate action, and making sure that the incoming HTTP method is appropriate
for the action, it calls `authorize()`, and rejects the request if authorization
fails.
### `LegacyAuthenticationMechanism`
### Authentication methods
Legacy authentication is straightforward, because the App Engine `UserService`
API does all the work. Because the protocol might be vulnerable to an XSRF
attack, the authentication mechanism issues and checks XSRF tokens as part
of the process if the HTTP method is not GET or HEAD.
Nomulus requests are authenticated via OIDC token authentication, though these
tokens can be created and validated in two ways. In each case, the
authentication mechanism converts an HTTP request to an authentication result,
which consists of an authentication level, a possible user object, and a
possible service account email.
### `OAuthAuthenticationMechanism`
#### `IapOidcAuthenticationMechanism`
OAuth 2 authentication is performed using the App Engine `OAuthService` API.
There are three Nomulus configuration values involved:
Most requests, e.g. the registrar console or Nomulus CLI requests) are routed
through GCP's
[Identity-Aware Proxy](https://docs.cloud.google.com/iap/docs/concepts-overview).
This forces the user to log in to some GAIA account (specifically, one that is
given access to the project). We attempt to validate a provided IAP OIDC token
with the IAP issuer URL (`https://cloud.google.com/iap`) and the proper IAP
audience (`/projects/{projectId}/global/backendServices/{serviceId}`), where
`projectId` refers to the GCP project, and `serviceId` refers to the service ID
retrievable from the
[IAP configuration page](https://pantheon.corp.google.com/security/iap).
Ideally, this service ID corresponds to the HTTPS load balancer that distributes
requests to the GKE pods.
* `availableOauthScopes` is the set of OAuth scopes passed to the service to
be checked for their presence.
Note: the local Nomulus CLI's
[LoginCommand](https://github.com/google/nomulus/blob/master/core/src/main/java/google/registry/tools/LoginCommand.java)
uses a special-case form of this where it saves long-lived IAP credentials
locally.
* `requiredOauthScopes` is the set of OAuth scopes which must be present. This
should be a subset of the available scopes. All scopes in this set must be
present for authentication to succeed.
#### `RegularOidcAuthenticationMechanism`
* `allowedOauthClientIds` is the set of allowable OAuth client IDs. Any client
ID in this set is sufficient for successful authentication.
Service account requests ( e.g.
[Cloud Scheduler jobs](https://docs.cloud.google.com/scheduler/docs/schedule-run-cron-job))
or requests coming through the proxy use a non-IAP OIDC token provided by the
caller. These requests have a different issuer URL (
`https://accounts.google.com`) and use the fairly standard OAuth bearer token
architecture -- an `Authorization` HTTP header of the form "Bearer: XXXX".
The code looks for an `Authorization` HTTP header of the form "BEARER XXXX...",
containing the access token. If it finds one, it calls `OAuthService` to
validate the token, check that the scopes and client ID match, and retrieve the
flag indicating whether the user is an admin.
### Configuration
### `AppEngineInternalAuthenticationMechanism`
Detection of internal requests is a little hacky. App Engine uses a special HTTP
header, `X-AppEngine-QueueName`, to indicate the queue from which the request
originates. If this header is present, internal authentication succeeds. App
Engine normally strips this header from external requests, so only internal
requests will be authenticated.
App Engine has a special carve-out for admin users, who are allowed to specify
headers which do not get stripped. So an admin user can use a command-line
utility like `curl` to craft a request which appears to Nomulus to be an
internal request. This has proven to be useful, facilitating the testing of
actions which otherwise could only be run via a dummy cron job.
However, it only works if App Engine can authenticate the user as an admin via
the `UserService` API. OAuth won't work, because authentication is performed by
the Nomulus code, and the headers will already have been stripped by App Engine
before the request is executed. Only the legacy, cookie-based method will work.
Be aware that App Engine defines an "admin user" as anyone with access to the
App Engine project, even those with read-only access.
## Other topics
### OAuth 2 not supported for the registry console
Currently, OAuth 2 is only supported for requests which specify the
`Authorization` HTTP header. The OAuth code reads this header and passes it to
the Google OAuth server (no other authentication servers are currently
supported) to verify the user's identity. This works fine for the `nomulus`
command-line tool.
It doesn't work for browser-based interactions such as the registrar console.
For that, we will (we think) need to redirect the user to the authentication
server, and upon receiving the user back, fish out the code and convert it to a
token which we store in a cookie. None of this is particularly hard, but for the
moment it seems easier to stick with the legacy App Engine UserService API. Of
course, contributions from the open-source community are welcome. :)
### Authorization via `web.xml`
Before the modern authentication and authorization framework described in this
document was put in place, Nomulus used to be protected by directives in the
`web.xml` file which allowed only logged-in users to access most endpoints. This
had the advantage of being very easy to implement, but it came with some
drawbacks, the primary one being lack of support for OAuth 2. App Engine's
standard login detection works fine when using a browser, but does not handle
cases where the request is coming from a standalone program such as the
`nomulus` command-line tool. By moving away from the `web.xml` approach, we
gained more flexibility to support an array of authentication and authorization
schemes, including custom ones developed by the Nomulus community, at the
expense of having to perform the authentication and authorization ourselves in
the code.
The `auth` block of the configuration requires two fields: *
`allowedServiceAccountEmails` is the list of service accounts that should be
allowed to run tasks when internally authenticated. This will likely include
whatever service account runs Nomulus in Google Kubernetes Engine, as well as
the Cloud Scheduler service account. * `oauthClientId` is the OAuth client ID
associated with IAP. This is retrievable from the
[Clients page](https://pantheon.corp.google.com/auth/clients) of GCP after
enabling the Identity-Aware Proxy. It should look something like
`someNumbers-someNumbersAndLetters.apps.googleusercontent.com`

View File

@@ -3,54 +3,46 @@
This document contains information on the overall structure of the code, and how
particularly important pieces of the system are implemented.
## Bazel build system
## Gradle build system
[Bazel](https://www.bazel.io/) is used to build and test the Nomulus codebase.
[Gradle](https://gradle.org/) is used to build and test the Nomulus codebase.
Bazel builds are described using [BUILD
files](https://www.bazel.io/versions/master/docs/build-ref.html). A directory
containing a BUILD file defines a package consisting of all files and
directories underneath it, except those directories which themselves also
contain BUILD files. A package contains targets. Most targets in the codebase
are of the type `java_library`, which generates `JAR` files, or `java_test`,
which runs tests.
Nomulus, for the most part, uses fairly standard Gradle task naming for building
and running tests, with the various tasks defined in various `build.gradle`
files.
The key to Bazel's ability to create reproducible builds is the requirement that
each build target must declare its direct dependencies. Each of those
dependencies is a target, which, in turn, must also declare its dependencies.
This recursive description of a target's dependencies forms an acyclic graph
that fully describes the targets which must be built in order to build any
target in the graph.
Dependencies and their version restrictions are defined in the
`dependencies.gradle` file. Within each subproject's `build.gradle` file, the
actual dependencies used by that subproject are listed along with the type of
dependency (e.g. implementation, testImplementation). Versions of each
dependency are locked to avoid frequent dependency churn, with the locked
versions stored in the various `gradle.lockfile` files. To update these
versions, run any Gradle command (e.g. `./gradlew build`) with the
`--write-locks` argument.
A wrinkle in this system is managing external dependencies. Bazel was designed
first and foremost to manage builds where all code lives in a single source
repository and is compiled from `HEAD`. In order to mesh with other build and
packaging schemes, such as libraries distributed as compiled `JAR`s, Bazel
supports [external target
declarations](https://www.bazel.io/versions/master/docs/external.html#transitive-dependencies).
The Nomulus codebase uses external targets pulled in from Maven Central, these
are declared in `java/google/registry/repositories.bzl`. The dependencies of
these external targets are not managed by Bazel; you must manually add all of
the dependencies or use the
[generate_workspace](https://docs.bazel.build/versions/master/generate-workspace.html)
tool to do it.
### Generating WAR archives for deployment
### Generating EAR/WAR archives for deployment
The `jetty` project is the main entry point for building the Nomulus WAR files,
and one can use the `war` gradle task to build the base WAR file. The various
deployment/release files use Docker to deploy this, in a system that is too
Google-specialized to replicate directly here.
There are special build target types for generating `WAR` and `EAR` files for
deploying Nomulus to GAE. These targets, `zip_file` and `registry_ear_file` respectively, are used in `java/google/registry/BUILD`. To generate archives suitable for deployment on GAE:
## Subprojects
```shell
$ bazel build java/google/registry:registry_ear
...
bazel-genfiles/java/google/registry/registry.ear
INFO: Elapsed time: 0.216s, Critical Path: 0.00s
# This will also generate the per-module WAR files:
$ ls bazel-genfiles/java/google/registry/*.war
bazel-genfiles/java/google/registry/registry_backend.war
bazel-genfiles/java/google/registry/registry_default.war
bazel-genfiles/java/google/registry/registry_tools.war
```
Within the Nomulus repository there are a few notable subprojects:
* `util` contains tools that don't depend on any of our other code, e.g.
libraries or raw utilities
* `db` contains database-related code, managing the schema and
deployment/testing of the database.
* `integration` contains tests to make sure that schema rollouts won't break
Nomulus, that code versions and schema versions are cross-compatible
* `console-webapp` contains the Typescript/HTML/CSS/Angular code for the
registrar console frontend
* `proxy` contains code for the EPP proxy, which relays port 700 requests to
the core EPP services
* `core` contains the bulk of the core Nomulus code, including request
handling+serving, backend, actions, etc
## Cursors
@@ -72,8 +64,8 @@ The following cursor types are defined:
* **`RDE_UPLOAD`** - RDE (thick) escrow deposit upload
* **`RDE_UPLOAD_SFTP`** - Cursor that tracks the last time we talked to the
escrow provider's SFTP server for a given TLD.
* **`RECURRING_BILLING`** - Expansion of `BillingRecurrence` (renew) billing events
into one-time `BillingEvent`s.
* **`RECURRING_BILLING`** - Expansion of `BillingRecurrence` (renew) billing
events into one-time `BillingEvent`s.
* **`SYNC_REGISTRAR_SHEET`** - Tracks the last time the registrar spreadsheet
was successfully synced.
@@ -82,16 +74,9 @@ next timestamp at which an operation should resume processing and a `CursorType`
that identifies which operation the cursor is associated with. In many cases,
there are multiple cursors per operation; for instance, the cursors related to
RDE reporting, staging, and upload are per-TLD cursors. To accomplish this, each
`Cursor` also has a scope, a `Key<ImmutableObject>` to which the particular
cursor applies (this can be e.g. a `Registry` or any other `ImmutableObject` in
the database, depending on the operation). If the `Cursor` applies to the entire
registry environment, it is considered a global cursor and has a scope of
`EntityGroupRoot.getCrossTldKey()`.
Cursors are singleton entities by type and scope. The id for a `Cursor` is a
deterministic string that consists of the websafe string of the Key of the scope
object concatenated with the name of the name of the cursor type, separated by
an underscore.
`Cursor` also has a scope, a string to which the particular cursor applies (this
can be anything, but in practice is either a TLD or `GLOBAL` for cross-TLD
cursors. Cursors are singleton entities by type and scope.
## Guava
@@ -101,8 +86,7 @@ idiomatic, well-tested, and performant add-ons to the JDK. There are several
libraries in particular that you should familiarize yourself with, as they are
used extensively throughout the codebase:
* [Immutable
Collections](https://github.com/google/guava/wiki/ImmutableCollectionsExplained):
* [Immutable Collections](https://github.com/google/guava/wiki/ImmutableCollectionsExplained):
Immutable collections are a useful defensive programming technique. When an
Immutable collection type is used as a parameter type, it immediately
indicates that the given collection will not be modified in the method.
@@ -144,11 +128,10 @@ as follows:
* `Domain` ([RFC 5731](https://tools.ietf.org/html/rfc5731))
* `Host` ([RFC 5732](https://tools.ietf.org/html/rfc5732))
* `Contact` ([RFC 5733](https://tools.ietf.org/html/rfc5733))
All `EppResource` entities use a Repository Object Identifier (ROID) as its
unique id, in the format specified by [RFC
5730](https://tools.ietf.org/html/rfc5730#section-2.8) and defined in
unique id, in the format specified by
[RFC 5730](https://tools.ietf.org/html/rfc5730#section-2.8) and defined in
`EppResourceUtils.createRoid()`.
Each entity also tracks a number of timestamps related to its lifecycle (in
@@ -164,12 +147,9 @@ the status of a resource at a given point in time.
## Foreign key indexes
Foreign key indexes provide a means of loading active instances of `EppResource`
objects by their unique IDs:
* `Domain`: fully-qualified domain name
* `Contact`: contact id
* `Host`: fully-qualified host name
`Domain` and `Host` each are foreign-keyed, meaning we often wish to query them
by their foreign keys (fully-qualified domain name and fully-qualified host
name, respectively).
Since all `EppResource` entities are indexed on ROID (which is also unique, but
not as useful as the resource's name), the `ForeignKeyUtils` provides a way to
@@ -203,10 +183,9 @@ events that are recorded as history entries, including:
The full list is captured in the `HistoryEntry.Type` enum.
Each `HistoryEntry` has a parent `Key<EppResource>`, the EPP resource that was
mutated by the event. A `HistoryEntry` will also contain the complete EPP XML
command that initiated the mutation, stored as a byte array to be agnostic of
encoding.
Each `HistoryEntry` has a reference to a singular EPP resource that was mutated
by the event. A `HistoryEntry` will also contain the complete EPP XML command
that initiated the mutation, stored as a byte array to be agnostic of encoding.
A `HistoryEntry` also captures other event metadata, such as the `DateTime` of
the change, whether the change was created by a superuser, and the ID of the
@@ -215,9 +194,9 @@ registrar that sent the command.
## Poll messages
Poll messages are the mechanism by which EPP handles asynchronous communication
between the registry and registrars. Refer to [RFC 5730 Section
2.9.2.3](https://tools.ietf.org/html/rfc5730#section-2.9.2.3) for their protocol
specification.
between the registry and registrars. Refer to
[RFC 5730 Section 2.9.2.3](https://tools.ietf.org/html/rfc5730#section-2.9.2.3)
for their protocol specification.
Poll messages are stored by the system as entities in the database. All poll
messages have an event time at which they become active; any poll request before
@@ -245,8 +224,9 @@ poll messages are ACKed (and thus deleted) in `PollAckFlow`.
## Billing events
Billing events capture all events in a domain's lifecycle for which a registrar
will be charged. A `BillingEvent` will be created for the following reasons (the
full list of which is represented by `BillingEvent.Reason`):
will be charged. A one-time `BillingEvent` will (or can) be created for the
following reasons (the full list of which is represented by
`BillingBase.Reason`):
* Domain creates
* Domain renewals
@@ -254,19 +234,19 @@ full list of which is represented by `BillingEvent.Reason`):
* Server status changes
* Domain transfers
A `BillingBase` can also contain one or more `BillingBase.Flag` flags that
provide additional metadata about the billing event (e.g. the application phase
during which the domain was applied for).
All `BillingBase` entities contain a parent `VKey<HistoryEntry>` to identify the
mutation that spawned the `BillingBase`.
There are 4 types of billing events, all of which extend the abstract
`BillingBase` base class:
* **`BillingEvent`**, a one-time billing event.
* **`BillingRecurrence`**, a recurring billing event (used for events such as domain
renewals).
* **`BillingCancellation`**, which represents the cancellation of either a `OneTime`
or `BillingRecurrence` billing event. This is implemented as a distinct event to
preserve the immutability of billing events.
* **`BillingRecurrence`**, a recurring billing event (used for events such as
domain renewals).
* **`BillingCancellation`**, which represents the cancellation of either a
`BillingEvent` or `BillingRecurrence` billing event. This is implemented as
a distinct event to preserve the immutability of billing events.
A `BillingBase` can also contain one or more `BillingBase.Flag` flags that
provide additional metadata about the billing event (e.g. the application phase
during which the domain was applied for).
All `BillingBase` entities contain reference to a given ROID (`EppResource`
reference) to identify the mutation that spawned the `BillingBase`.

View File

@@ -1,82 +1,28 @@
# Coding FAQ
This file is a motley assortment of informational emails generated in response
to questions from development partners.
## How do I mock Google Cloud Storage in tests?
AppEngine's GCS client automatically switches over to a local implementation,
[`GcsServiceFactory`](https://github.com/GoogleCloudPlatform/appengine-gcs-client/blob/master/java/src/main/java/com/google/appengine/tools/cloudstorage/GcsServiceFactory.java#L61),
for tests.
So rather than mocking GCS-related stuff at all, just use the fake local
implementation. This is what our tests should be doing; see
[`ExportCommitLogDiffActionTest`](https://github.com/google/nomulus/blob/master/javatests/google/registry/backup/ExportCommitLogDiffActionTest.java#L70).
Very rarely there have been cases where we've needed something beyond that (e.g.
to test against GCS being eventually consistent). In that case, rather than
mocking GcsUtils, you'd need to create a real instance of it but pass in a
mocked-out GcsService instance. Those are a bit of a pain to make since
GcsServiceImpl itself is also final (and not code we control), but you could
roll your own implementation by hand, or cheat and use a reflective proxy, as we
do in
[`GcsDiffFileListerTest`](https://github.com/google/domain-registry/blob/master/javatests/google/registry/backup/GcsDiffFileListerTest.java#L112).
## How do I test authentication on the SDK Development Server?
*Can someone explain how `GaeUserIdConverter.convertEmailAddressToGaeUserId()`
actually does the conversion? I see it's doing a save/load(/delete) of a
`GaeUserIdConverter`, which contains a `User`. Does Objectify do some magic on
load to look up the real GAE user ID from the email address? In trying to get
the registry to run in the SDK Development Server, I am seeing the wrong user ID
when adding a new RegistryContact using the command line tool.*
The [App Engine development
server](https://cloud.google.com/appengine/docs/python/tools/using-local-server)
is not particularly robust; it appears that it always returns userId
185804764220139124118 for any authenticated user, as per [this StackOverflow
thread](http://stackoverflow.com/questions/30524328/what-user-is-provided-by-app-engine-devserver).
For testing purposes, it might suffice to just create a RegistrarContact with
that userId by hand somehow, so that you can log in. In the longer term, if we
switch to Google Sign-In, this specific problem would go away, but based on the
above, it looks like OAuthService doesn't really work on the dev server either.
So for testing actual "real" authentication, you'd want to use an alpha instance
rather than the development server. We don't use the development server very
much internally for this reason.
## Do you support RDAP?
We provide an implementation of the Registry Data Access Protocol (RDAP) which provides
similar data to the outdated WHOIS protocol, but in a structured format. The
standard is defined in STD 95 and its RFCs:
We provide an implementation of the Registry Data Access Protocol (RDAP) which
provides similar data to the outdated WHOIS protocol, but in a structured
format. The standard is defined in STD 95 and its RFCs:
* [RFC 7480: HTTP Usage in the Registration Data Access Protocol
(RDAP)](https://tools.ietf.org/html/rfc7480)
* [RFC 7480: HTTP Usage in the Registration Data Access Protocol (RDAP)](https://tools.ietf.org/html/rfc7480)
* [RFC 7481: Security Services for the Registration Data Access Protocol
(RDAP)](https://tools.ietf.org/html/rfc7481)
* [RFC 9082: Registration Data Access Protocol (RDAP) Query
Format](https://tools.ietf.org/html/rfc9082)
* [RFC 9083: JSON Responses for the Registration Data Access Protocol
(RDAP)](https://tools.ietf.org/html/rfc9083)
* [RFC 9224: Finding the Authoritative Registration Data (RDAP)
Service](https://tools.ietf.org/html/rfc9224)
* [RFC 9082: Registration Data Access Protocol (RDAP) Query Format](https://tools.ietf.org/html/rfc9082)
* [RFC 9083: JSON Responses for the Registration Data Access Protocol (RDAP)](https://tools.ietf.org/html/rfc9083)
* [RFC 9224: Finding the Authoritative Registration Data (RDAP) Service](https://tools.ietf.org/html/rfc9224)
If you access this endpoint on a running Nomulus system:
`https://{PROJECT-ID}.appspot.com/rdap/domains?name=ex*`
`https://pubapi.{SERVER_URL}/rdap/domains?name=ex*`
it should search for all domains that start with "ex", returning the results in
JSON format. This functionality is still under development, so it is quite
possible that the format of returned data will change over time, but the basic
structure should be the same, as defined by RFCs 7480 through 7484. Request
paths which ought to mostly work (though no guarantees yet):
JSON format. Request paths which ought to work:
```
/rdap/domain/abc.tld
/rdap/nameserver/ns1.abc.tld
/rdap/entity/ROID
/rdap/entity/registrar-iana-identifier
/rdap/domains?name=abc.tld
/rdap/domains?name=abc*
@@ -86,8 +32,6 @@ paths which ought to mostly work (though no guarantees yet):
/rdap/domains?nsIp=1.2.3.4
/rdap/nameservers?name=ns*.abc.tld
/rdap/nameservers?ip=1.2.3.4
/rdap/entities?fn=John*
/rdap/entities?handle=ROI*
/rdap/entities?handle=registrar-iana-identifier
```

View File

@@ -2,10 +2,11 @@
There are multiple different kinds of configuration that go into getting a
working registry system up and running. Broadly speaking, configuration works in
two ways -- globally, for the entire sytem, and per-TLD. Global configuration is
managed by editing code and deploying a new version, whereas per-TLD
configuration is data that lives in the database in `Tld` entities, and is
updated by running `nomulus` commands without having to deploy a new version.
two ways -- globally, for the entire system, and per-TLD. Global configuration
is managed by editing code and deploying a new version, whereas per-TLD
configuration is data that lives in the database in `Tld` entities, and
[is updated](operational-procedures/modifying-tlds.md) without having to deploy
a new version.
## Initial configuration
@@ -23,40 +24,14 @@ Before getting into the details of configuration, it's important to note that a
lot of configuration is environment-dependent. It is common to see `switch`
statements that operate on the current `RegistryEnvironment`, and return
different values for different environments. This is especially pronounced in
the `UNITTEST` and `LOCAL` environments, which don't run on App Engine at all.
As an example, some timeouts may be long in production and short in unit tests.
the `UNITTEST` and `LOCAL` environments, which don't run on GCP at all. As an
example, some timeouts may be long in production and short in unit tests.
See the [Architecture documentation](./architecture.md) for more details on
environments as used by Nomulus.
## App Engine configuration
App Engine configuration isn't covered in depth in this document as it is
thoroughly documented in the [App Engine configuration docs][app-engine-config].
The main files of note that come pre-configured in Nomulus are:
* `cron.xml` -- Configuration of cronjobs
* `web.xml` -- Configuration of URL paths on the webserver
* `appengine-web.xml` -- Overall App Engine settings including number and type
of instances
* `cloud-scheduler-tasks.xml` -- Configuration of Cloud Scheduler Tasks
* * `cloud-tasks-queue.xml` -- Configuration of Cloud Tasks Queue
* `application.xml` -- Configuration of the application name and its services
Cron, web, and queue are covered in more detail in the "App Engine architecture"
doc, and the rest are covered in the general App Engine documentation.
If you are not writing new code to implement custom features, is unlikely that
you will need to make any modifications beyond simple changes to
`application.xml` and `appengine-web.xml`. If you are writing new features, it's
likely you'll need to add cronjobs, URL paths, and task queues, and thus edit
those associated XML files.
The existing codebase is configured for running a full-scale registry with
multiple TLDs. In order to deploy to App Engine, you will either need to
[increase your quota](https://cloud.google.com/compute/quotas#requesting_additional_quota)
to allow for at least 100 running instances or reduce `max-instances` in the
backend `appengine-web.xml` files to 25 or less.
TODO: documentation about how to set up GKE and what config points are necessary
to modify there
## Global configuration
@@ -65,9 +40,9 @@ deployed in the app. The full list of config options and their default values
can be found in the [`default-config.yaml`][default-config] file. If you wish to
change any of these values, do not edit this file. Instead, edit the environment
configuration file named
`google/registry/config/files/nomulus-config-ENVIRONMENT.yaml`, overriding only
the options you wish to change. Nomulus ships with blank placeholders for all
standard environments.
`core/src/main/java/google/registry/config/files/nomulus-config-ENVIRONMENT.yaml`,
overriding only the options you wish to change. Nomulus ships with blank
placeholders for all standard environments.
You will not need to change most of the default settings. Here is the subset of
settings that you will need to change for all deployed environments, including
@@ -75,52 +50,65 @@ development environments. See [`default-config.yaml`][default-config] for a full
description of each option:
```yaml
appEngine:
projectId: # Your App Engine project ID
toolsServiceUrl: https://tools-dot-PROJECT-ID.appspot.com # Insert your project ID
isLocal: false # Causes saved credentials to be used.
gcpProject:
projectId: # Your GCP project ID
projectIdNumber: # The corresponding ID number, found on the home page
locationId: # e.g. us-central1
isLocal: false # Causes saved credentials to be used
baseDomain: # the base domain from which the registry will be served, e.g. registry.google
gSuite:
domainName: # Your G Suite domain name
adminAccountEmailAddress: # An admin login for your G Suite account
domainName: # Your GSuit domain name, likely same as baseDomain above
adminAccountEmailAddress: # An admin login for your GSuite account
auth:
allowedServiceAccountEmails:
- # a list of service account emails given access to Nomulus
oauthClientId: # the client ID of the Identity-Aware Proxy
cloudSql:
jdbcUrl: # path to the Postgres server
```
For fully-featured production environments that need the full range of features
(e.g. RDE, correct contact information on the registrar console, etc.) you will
need to specify more settings.
need to specify *many* more settings.
From a code perspective, all configuration settings ultimately come through the
[`RegistryConfig`][registry-config] class. This includes a Dagger module called
`ConfigModule` that provides injectable configuration options. While most
configuration options can be changed from within the yaml config file, certain
derived options may still need to be overriden by changing the code in this
derived options may still need to be overridden by changing the code in this
module.
## OAuth 2 client id configuration
## OAuth 2 client ID configuration
The open source Nomulus release uses OAuth 2 to authenticate and authorize
users. This includes the `nomulus` tool when it connects to the system to
execute commands. OAuth must be configured before you can use the `nomulus` tool
to set up the system.
Nomulus uses OAuth 2 to authenticate and authorize users. This includes the
`nomulus` [command-line tool](admin-tool.md) when it connects to the system to
execute commands as well as the
[Identity-Aware Proxy](https://pantheon.corp.google.com/security/iap) used to
authenticate standard requests. OAuth must be configured before you can use
either system.
OAuth defines the concept of a *client id*, which identifies the application
OAuth defines the concept of a *client ID*, which identifies the application
which the user wants to authorize. This is so that, when a user clicks in an
OAuth permission dialog and grants access to data, they are not granting access
to every application on their computer (including potentially malicious ones),
but only to the application which they agree needs access. Each environment of
the Nomulus system should have its own client id. Multiple installations of the
`nomulus` tool application can share the same client id for the same
environment.
the Nomulus system should have its own pair of client IDs. Multiple
installations of the `nomulus` tool application can share the same client ID for
the same environment.
There are three steps to configuration.
For the Nomulus tool OAuth configuration, do the following steps:
* **Create the client id in App Engine:** Go to your project's
* **Create the registry tool client ID in GCP:** Go to your project's
["Credentials" page](https://console.developers.google.com/apis/credentials)
in the Developer's Console. Click "Create credentials" and select "OAuth
client ID" from the dropdown. In the create credentials window, select an
application type of "Desktop app". After creating the client id, copy the
client id and client secret which are displayed in the popup window. You may
also obtain this information by downloading the json file for the client id.
application type of "Desktop app". After creating the client ID, copy the
client ID and client secret which are displayed in the popup window. You may
also obtain this information by downloading the JSON file for the client ID
* **Copy the client secret information to the config file:** The *client
secret file* contains both the client ID and the client secret. Copy the
@@ -129,18 +117,21 @@ There are three steps to configuration.
`registryTool` section. This will make the `nomulus` tool use this
credential to authenticate itself to the system.
* **Add the new client id to the configured list of allowed client ids:** The
configuration files include an `oAuth` section, which defines a parameter
called `allowedOauthClientIds`, specifying a list of client ids which are
permitted to connect. Add the client ID to the list. You will need to
rebuild and redeploy the project so that the configuration changes take
effect.
For IAP configuration, do the following steps: * **Create the IAP client ID:**
Follow similar steps from above to create an additional OAuth client ID, but
using an application type of "Web application". Note the client ID and secret. *
**Enable IAP for your HTTPS load balancer:** On the
[IAP page](https://pantheon.corp.google.com/security/iap), enable IAP for all of
the backend services that all use the same HTTPS load balancer. * **Use a custom
OAuth configuration:** For the backend services, under the "Settings" section
(in the three-dot menu) enable custom OAuth and insert the client ID and secret
that we just created * **Save the client ID:** In the configuration file, save
the client ID as `oauthClientId` in the `auth` section
Once these steps are taken, the `nomulus` tool will use a client id which the
server is configured to accept, and authentication should succeed. Note that
many Nomulus commands also require that the user have App Engine admin
privileges, meaning that the user needs to be added as an owner or viewer of the
App Engine project.
Once these steps are taken, the `nomulus` tool and IAP will both use client IDs
which the server is configured to accept, and authentication should succeed.
Note that many Nomulus commands also require that the user have GCP admin
privileges on the project in question.
## Sensitive global configuration
@@ -151,8 +142,8 @@ control mishap. We use a secret store to persist these values in a secure
manner, which is backed by the GCP Secret Manager.
The `Keyring` interface contains methods for all sensitive configuration values,
which are primarily credentials used to access various ICANN and ICANN-
affiliated services (such as RDE). These values are only needed for real
which are primarily credentials used to access various ICANN and
ICANN-affiliated services (such as RDE). These values are only needed for real
production registries and PDT environments. If you are just playing around with
the platform at first, it is OK to put off defining these values until
necessary. This allows the codebase to start and run, but of course any actions
@@ -169,16 +160,16 @@ ${KEY_NAME}`.
## Per-TLD configuration
`Tld` entities, which are persisted to the database, are used for per-TLD
configuration. They contain any kind of configuration that is specific to a TLD,
such as the create/renew price of a domain name, the pricing engine
implementation, the DNS writer implementation, whether escrow exports are
enabled, the default currency, the reserved label lists, and more. The `nomulus
update_tld` command is used to set all of these options. See the
[admin tool documentation](./admin-tool.md) for more information, as well as the
command-line help for the `update_tld` command. Unlike global configuration
above, per-TLD configuration options are stored as data in the running system,
and thus do not require code pushes to update.
`Tld` entities, which are persisted to the database and stored in YAML files,
are used for per-TLD configuration. They contain any kind of configuration that
is specific to a TLD, such as the create/renew price of a domain name, the
pricing engine implementation, the DNS writer implementation, whether escrow
exports are enabled, the default currency, the reserved label lists, and more.
To create or update TLDs, we use
[YAML files](operational-procedures/modifying-tlds.md) and the `nomulus
configure_tld` command. Because the TLDs are stored as data in the running
system, they do not require code pushes to update.
[app-engine-config]: https://cloud.google.com/appengine/docs/java/configuration-files
[default-config]: https://github.com/google/nomulus/blob/master/java/google/registry/config/files/default-config.yaml
@@ -242,7 +233,7 @@ connectionName: your-project:us-central1:nomulus
Use the `update_keyring_secret` command to update the `SQL_PRIMARY_CONN_NAME`
key with the connection name. If you have created a read-replica, update the
`SQL_REPLICA_CONN_NAME` key with the replica's connection time.
`SQL_REPLICA_CONN_NAME` key with the replica's connection name.
### Installing the Schema
@@ -334,6 +325,17 @@ $ gcloud sql connect nomulus --user=nomulus
From this, you should have a postgres prompt and be able to enter the "GRANT"
command specified above.
### Replication and Backups
We highly recommend creating a read-only replica of the database and using the
previously-mentioned `SQL_REPLICA_CONN_NAME` value in the keyring to the name of
that replica. By doing so, you can remove some load from the primary database.
We also recommend enabling
[point-in-time recovery](https://docs.cloud.google.com/sql/docs/postgres/backup-recovery/pitr)
for the instance, just in case something bad happens and you need to restore
from a backup.
### Cloud SecretManager
You'll need to enable the SecretManager API in your project.

View File

@@ -1,37 +0,0 @@
# Developing
This document contains advice on how to do development on the Nomulus codebase,
including how to set up an IDE environment and run tests.
## Running a local development server
`RegistryTestServer` is a lightweight test server for the registry that is
suitable for running locally for development. It uses local versions of all
Google Cloud Platform dependencies, when available. Correspondingly, its
functionality is limited compared to a Nomulus instance running on an actual App
Engine instance. It is most helpful for doing web UI development such as on the
registrar console: it allows you to update JS, CSS, images, and other front-end
resources, and see the changes instantly simply by refreshing the relevant page
in your browser.
To see the registry server's command-line parameters, run:
```shell
$ bazel run //javatests/google/registry/server -- --help
```
To start an instance of the server, run:
```shell
$ bazel run //javatests/google/registry/server {your params}
```
Once it is running, you can interact with it via normal `nomulus` commands, or
view the registrar console in a web browser by navigating to
[http://localhost:8080/registrar](http://localhost:8080/registrar). The server
will continue running until you terminate the process.
If you are adding new URL paths, or new directories of web-accessible resources,
you will need to make the corresponding changes in `RegistryTestServer`. This
class contains all of the routing and static file information used by the local
development server.

View File

@@ -3,8 +3,8 @@
This document covers the first steps of creating some test entities in a newly
deployed and configured testing environment. It isn't required, but it does help
gain familiarity with the system. If you have not already done so, you must
first complete [installation](./install.md) and [initial
configuration](./configuration.md).
first complete [installation](./install.md) and
[initial configuration](./configuration.md).
Note: Do not create these entities on a production environment! All commands
below use the [`nomulus` admin tool](./admin-tool.md) to interact with the
@@ -12,55 +12,30 @@ running registry system. We'll assume that all commands below are running in the
`alpha` environment; if you named your environment differently, then use that
everywhere that `alpha` appears.
## Temporary extra steps
Using the `nomulus` admin tool currently requires two additional steps to enable
full functionality. These steps should _not_ be done for a production
deployment - a suitable solution for production is in progress.
Modify the `tools` module `web.xml` file to remove admin-only restrictions.
Look for the `<auth-constraint>admin</auth-constraint>` element. Comment out
this element, and redeploy the tools module to your live app.
[app-default-creds]: https://developers.google.com/identity/protocols/application-default-credentials
## Create a TLD
Pick the name of a TLD to create. For the purposes of this example we'll use
"example", which conveniently happens to be an ICANN reserved string, meaning
it'll never be created for real on the Internet at large.
it'll never be created for real on the Internet at large. Then,
[create a TLD](operational-procedures/modifying-tlds.md) using the
[example template](https://github.com/google/nomulus/blob/master/core/src/test/resources/google/registry/tools/tld.yaml)
as a guide.
The fields you'll want to change from the template: * `driveFolderId` should be
null * `roidSuffix` should be `EXAMPLE` -- this is the suffix that will be used
for repository ids of domains on the TLD. This suffix must be all uppercase and
a maximum of eight ASCII characters and can be set to the upper-case equivalent
of our TLD name (if it is 8 characters or fewer), such as "EXAMPLE." You can
also abbreviate the upper-case TLD name down to 8 characters. Refer to the
[gTLD Registry Advisory: Correction of non-compliant ROIDs][roids] for further
information. * `tldStr` should be `example` * `tldType` should be `TEST`, which
identifies that the TLD is for testing purposes, whereas `REAL` would identify
the TLD as a live TLD
```shell
$ nomulus -e alpha create_tld example --roid_suffix EXAMPLE \
--initial_tld_state GENERAL_AVAILABILITY --tld_type TEST \
--dns_writers VoidDnsWriter
[ ... snip confirmation prompt ... ]
Perform this command? (y/N): y
Updated 1 entities.
$ nomulus -e alpha configure_tld --input=example.yaml
```
* `-e` is the environment name (`alpha` in this example).
* `create_tld` is the subcommand to create a TLD. The TLD name is "example"
which happens to be an ICANN reserved string, and therefore "example" can
never be created on the Internet at large.
* `--initial_tld_state` defines the initial state of the TLD.
`GENERAL_AVAILABILITY`, in the case of our example, allows you to
immediately create domain names by bypassing the sunrise and landrush domain
registration periods.
* `--tld_type` is the type of TLD. `TEST` identifies that the TLD is for
testing purposes, where `REAL` identifies the TLD is a live TLD.
* `roid_suffix` is the suffix that will be used for repository ids of domains
on the TLD. This suffix must be all uppercase and a maximum of eight ASCII
characters and can be set to the upper-case equivalent of our TLD name (if
it is 8 characters or fewer), such as "EXAMPLE." You can also abbreviate the
upper-case TLD name down to 8 characters. Refer to the [gTLD Registry
Advisory: Correction of non-compliant ROIDs][roids] for further information.
* `--dns_writers` is the list of DNS writer modules that specify how changes
to domains for the TLD are communicated to actual DNS servers. We use
`VoidDnsWriter` in this case so as to not have to set up DNS. Typically
one might use CloudDnsWriter (for Google Cloud DNS) or implement your own
solution.
## Create a registrar
Now we need to create a registrar and give it access to operate on the example
@@ -96,36 +71,6 @@ Where:
* `--allowed_tlds` is a comma-delimited list of top level domains where this
registrar has access.
## Create a contact
Now we want to create a contact, as a contact is required before a domain can be
created. Contacts can be used on any number of domains across any number of
TLDs, and contain the information on who owns or provides technical support for
a TLD. These details will appear in WHOIS queries.
```shell
$ nomulus -e alpha create_contact -c acme --id abcd1234 \
--name 'John Smith' --street '234 Fake St' --city 'North Fakington' \
--state MA --zip 23456 --cc US --email jsmith@e.mail
[ ... snip EPP response ... ]
```
Where:
* `create_contact` is the subcommand to create a contact.
* `-c` is used to define the registrar. The `-c` option is used with most
`registry_tool` commands to specify the id of the registrar executing the
command. Contact, domain, and host creation all work by constructing an EPP
message that is sent to the registry, and EPP commands need to run under the
context of a registrar. The "acme" registrar that was created above is used
for this purpose.
* `--id` is the contact id, and is referenced elsewhere in the system (e.g.
when a domain is created and the admin contact is specified).
* `--name` is the display name of the contact, which is usually the name of a
company or of a person.
The address and `email` fields are required to create a contact.
## Create a host
Hosts are used to specify the IP addresses (either v4 or v6) that are associated
@@ -156,8 +101,7 @@ To tie it all together, let's create a domain name that uses the above contact
and host.
```shell
$ nomulus -e alpha create_domain fake.example --client acme --admins abcd1234 \
--techs abcd1234 --registrant abcd1234 --nameservers ns1.google.com
$ nomulus -e alpha create_domain fake.example --client acme --nameservers ns1.google.com
[ ... snip EPP response ... ]
```
@@ -166,26 +110,19 @@ Where:
* `create_domain` is the subcommand to create a domain name. It accepts a
whitespace-separated list of domain names to be created
* `--client` is used to define the registrar.
* `--admins` is the administrative contact's id(s).
* `--techs` is the technical contact's id(s).
* `--registrant` is the registrant contact's id.
* `--nameservers` is a comma-separated list of hosts.
Note how the same contact id is used for the administrative, technical, and
registrant contact. It is common for domain names to use the same details for
all contacts on a domain name.
## Verify test entities using RDAP
## Verify test entities using WHOIS
To verify that everything worked, let's query the WHOIS information for
To verify that everything worked, let's query the RDAP information for
fake.example:
```shell
$ nomulus -e alpha whois_query fake.example
[ ... snip WHOIS response ... ]
$ nomulus -e alpha rdap_query fake.example
[ ... snip RDAP response ... ]
```
You should see all of the information in WHOIS that you entered above for the
contact, nameserver, and domain.
You should see all the information in RDAP that you entered above for the
nameserver and domain.
[roids]: https://www.icann.org/resources/pages/correction-non-compliant-roids-2015-08-26-en

View File

@@ -22,15 +22,17 @@ To upgrade to a new Gradle version for this project, use:
gradle wrapper --gradle-version version-number
```
## Deploy to AppEngine
## Deploy to GCP Test Projects
Use the Gradle task 'appengineDeploy' to build and deploy to AppEngine. For now
you must update the appengine.deploy.project in build.gradle to your
GCP project ID.
If your [configuration](configuration.md) is up to date with the proper test
projects configured, you can deploy to GCP through the Gradle command line.
To deploy the Gradle build, you will need the Google Cloud SDK and its
app-engine-java component.
Use the Gradle task `deployNomulus` to build and deploy to a GCP test project
providing the test project as an argument, e.g.
```shell
./gradlew deployNomulus -Penvironment=alpha
```
### Notable Issues
@@ -40,13 +42,8 @@ is easier to exclude the suite classes than individual test classes. This is the
reason why all test tasks in the :core project contain the exclude pattern
'"**/*TestCase.*", "**/*TestSuite.*"'
Many Nomulus tests are not hermetic: they modify global state, but do not clean
Some Nomulus tests are not hermetic: they modify global state, but do not clean
up on completion. This becomes a problem with Gradle. In the beginning we forced
Gradle to run every test class in a new process, and incurred heavy overheads.
Since then, we have fixed some tests, and manged to divide all tests into three
suites that do not have intra-suite conflicts. We will revisit the remaining
tests soon.
Note that it is unclear if all conflicting tests have been identified. More may
be exposed if test execution order changes, e.g., when new tests are added or
execution parallelism level changes.
Since then, we have fixed some tests, and manged to divide all tests into two
suites that do not have intra-suite conflicts (`fragileTest` and `standardTest`)

View File

@@ -6,48 +6,41 @@ This document covers the steps necessary to download, build, and deploy Nomulus.
You will need the following programs installed on your local machine:
* A recent version of the [Java 11 JDK][java-jdk11].
* [Google App Engine SDK for Java][app-engine-sdk], and configure aliases to the `gcloud` and `appcfg.sh` utilities (
you'll use them a lot).
* [Git](https://git-scm.com/) version control system.
* Docker (confirm with `docker info` no permission issues, use `sudo groupadd docker` for sudoless docker).
* Python version 3.7 or newer.
* gnupg2 (e.g. in run `sudo apt install gnupg2` in Debian-like Linuxes)
* A recent version of the [Java 21 JDK][java-jdk21].
* The [Google Cloud CLI](https://docs.cloud.google.com/sdk/docs/install-sdk)
(configure an alias to the `gcloud`utility, because you'll use it a lot)
* [Git](https://git-scm.com/) version control system.
* Docker (confirm with `docker info` no permission issues, use `sudo groupadd
docker` for sudoless docker).
* Python version 3.7 or newer.
* gnupg2 (e.g. in run `sudo apt install gnupg2` in Debian-like Linuxes)
**Note:** App Engine does not yet support Java 9. Also, the instructions in this
document have only been tested on Linux. They might work with some alterations
on other operating systems.
**Note:** The instructions in this document have only been tested on Linux. They
might work with some alterations on other operating systems.
## Download the codebase
Start off by using git to download the latest version from the [Nomulus GitHub
page](https://github.com/google/nomulus). You may checkout any of the daily
tagged versions (e.g. `nomulus-20200629-RC00`), but in general it is also
safe to simply checkout from HEAD:
Start off by using git to download the latest version from the
[Nomulus GitHub page](https://github.com/google/nomulus). You may check out any
of the daily tagged versions (e.g. `nomulus-20260101-RC00`), but in general it
is also safe to simply check out from HEAD:
```shell
$ git clone git@github.com:google/nomulus.git
Cloning into 'nomulus'...
[ .. snip .. ]
$ cd nomulus
$ ls
apiserving CONTRIBUTORS java LICENSE scripts
AUTHORS docs javascript python third_party
CONTRIBUTING.md google javatests README.md WORKSPACE
```
Most of the directory tree is organized into gradle sub-projects (see
`settings.gradle` for details). The following other top-level directories are
Most of the directory tree is organized into gradle subprojects (see
`settings.gradle` for details). The following other top-level directories are
also defined:
* `buildSrc` -- Gradle extensions specific to our local build and release
methodology.
* `config` -- Tools for build and code hygiene.
* `docs` -- The documentation (including this install guide)
* `gradle` -- Configuration and code managed by the gradle build system.
* `gradle` -- Configuration and code managed by the Gradle build system.
* `integration` -- Testing scripts for SQL changes.
* `java-format` -- The Google java formatter and wrapper scripts to use it
incrementally.
* `python` -- Some Python reporting scripts
* `release` -- Configuration for our continuous integration process.
## Build the codebase
@@ -56,34 +49,29 @@ The first step is to build the project, and verify that this completes
successfully. This will also download and install dependencies.
```shell
$ ./nom_build build
$ ./gradlew build
Starting a Gradle Daemon (subsequent builds will be faster)
Plugins: Using default repo...
> Configure project :buildSrc
Java dependencies: Using Maven central...
[ .. snip .. ]
```
The `nom_build` script is just a wrapper around `gradlew`. Its main
additional value is that it formalizes the various properties used in the
build as command-line flags.
The "build" command builds all the code and runs all the tests. This will take a
while.
The "build" command builds all of the code and runs all of the tests. This
will take a while.
## Create and configure a GCP project
## Create an App Engine project
First, [create an
application](https://cloud.google.com/appengine/docs/java/quickstart) on Google
Cloud Platform. Make sure to choose a good Project ID, as it will be used
repeatedly in a large number of places. If your company is named Acme, then a
good Project ID for your production environment would be "acme-registry". Keep
First,
[create an application](https://cloud.google.com/appengine/docs/java/quickstart)
on Google Cloud Platform. Make sure to choose a good Project ID, as it will be
used repeatedly in a large number of places. If your company is named Acme, then
a good Project ID for your production environment would be "acme-registry". Keep
in mind that project IDs for non-production environments should be suffixed with
the name of the environment (see the [Architecture
documentation](./architecture.md) for more details). For the purposes of this
example we'll deploy to the "alpha" environment, which is used for developer
testing. The Project ID will thus be `acme-registry-alpha`.
the name of the environment (see the
[Architecture documentation](./architecture.md) for more details). For the
purposes of this example we'll deploy to the "alpha" environment, which is used
for developer testing. The Project ID will thus be `acme-registry-alpha`.
Now log in using the command-line Google Cloud Platform SDK and set the default
project to be this one that was newly created:
@@ -96,6 +84,17 @@ You are now logged in as [user@email.tld].
$ gcloud config set project acme-registry-alpha
```
And make sure the required APIs are enabled in the project:
```shell
$ gcloud services enable \
container.googleapis.com \
artifactregistry.googleapis.com \
sqladmin.googleapis.com \
secretmanager.googleapis.com \
compute.googleapis.com
```
Now modify `projects.gradle` with the name of your new project:
<pre>
@@ -106,42 +105,51 @@ rootProject.ext.projects = ['production': 'your-production-project',
'crash' : 'your-crash-project']
</pre>
Next follow the steps in [configuration](./configuration.md) to configure the
complete system or, alternately, read on for an initial deploy in which case
you'll need to deploy again after configuration.
#### Create GKE Clusters
## Deploy the code to App Engine
We recommend Standard clusters with Workload Identity enabled to allow pods to
securely access Cloud SQL and Secret Manager. Feel free to adjust the numbers
and sizing as desired.
AppEngine deployment with gradle is straightforward:
```shell
$ gcloud container clusters create nomulus-cluster \
--region=$REGION \
--workload-pool=$PROJECT_ID.svc.id.goog \
--num-nodes=3 \
--enable-ip-alias
$ gcloud container clusters create proxy-cluster \
--region=$REGION \
--workload-pool=$PROJECT_ID.svc.id.goog \
--num-nodes=3 \
--enable-ip-alias
```
$ ./nom_build appengineDeploy --environment=alpha
Then create an artifact repository: `shell $ gcloud artifacts repositories
create nomulus-repo \ --repository-format=docker \ --location=$REGION \
--description="Nomulus Docker images"`
To verify successful deployment, visit
https://acme-registry-alpha.appspot.com/registrar in your browser (adjusting
appropriately for the project ID that you actually used). If the project
deployed successfully, you'll see a "You need permission" page indicating that
you need to configure the system and grant access to your Google account. It's
time to go to the next step, configuration.
See the files and documentation in the `release/` folder for more information on
the release process. You will likely need to customize the internal build
process for your own setup, including internal repository management, builds,
and where Nomulus is deployed.
Configuration is handled by editing code, rebuilding the project, and deploying
again. See the [configuration guide](./configuration.md) for more details.
Once you have completed basic configuration (including most critically the
project ID, client id and secret in your copy of the `nomulus-config-*.yaml`
files), you can rebuild and start using the `nomulus` tool to create test
entities in your newly deployed system. See the [first steps tutorial](./first-steps-tutorial.md)
again. See the [configuration guide](./configuration.md) for more details. Once
you have completed basic configuration (including most critically the project
ID, client id and secret in your copy of the `nomulus-config-*.yaml` files), you
can rebuild and start using the `nomulus` tool to create test entities in your
newly deployed system. See the [first steps tutorial](./first-steps-tutorial.md)
for more information.
[app-engine-sdk]: https://cloud.google.com/appengine/docs/java/download
[java-jdk11]: https://www.oracle.com/java/technologies/javase-downloads.html
[java-jdk21]: https://www.oracle.com/java/technologies/javase-downloads.html
## Deploy the BEAM Pipelines
## Deploy the Beam Pipelines
Nomulus is in the middle of migrating all pipelines to use flex-template. For
pipelines already based on flex-template, deployment in the testing environments
Deployment of the Beam pipelines to Cloud Dataflow in the testing environments
(alpha and crash) can be done using the following command:
```shell
./nom_build :core:stageBeamPipelines --environment=alpha
./gradlew :core:stageBeamPipelines -Penvironment=alpha
```
Pipeline deployment in other environments are through CloudBuild. Please refer

34
docs/local-testing.md Normal file
View File

@@ -0,0 +1,34 @@
# Local Testing
## Running a local development server
Nomulus provides a `RegistryTestServer` that is a lightweight test server
suitable for running local development. It uses local versions of all Google
Cloud Platform dependencies when available. Correspondingly, it is primarily
useful for doing web UI development (i.e. the registrar console). It allows you
to update Typescript, HTML, and CSS and see the changes simply by refreshing the
relevant page in your browser.
In order to serve content locally, there are two services that must be run: *
the `RegistryTestServer` to serve as the backing server * the Angular service to
provide the UI files
In order to do this in one step, from the `console-webapp` folder, run:
```shell
$ npm install
$ npm run start:dev
```
This will start both the `RegistryTestServer` and the Angular testing service.
Any changes to Typescript/HTML/CSS files will be recompiled and available on
page reload.
One it is running, you can interact with the console by going to
`http://localhost:4200` to view the registrar console in a web browser. The
server will continue running until you terminate the process.
If you are adding new URL paths, or new directories of web-accessible resources,
you will need to make the corresponding changes in `RegistryTestServer`. This
class contains all the routing and static file information used by the local
development server.

View File

@@ -7,7 +7,7 @@ production registry system.
[Stackdriver Monitoring](https://cloud.google.com/monitoring/docs/) is used to
instrument internal state within the Nomulus internal environment. This is
broadly called white-box monitoring. EPP, DNS, and WHOIS are instrumented. The
broadly called white-box monitoring. EPP, DNS, and RDAP are instrumented. The
metrics monitored are as follows:
* `/custom/dns/publish_domain_requests` -- A count of publish domain requests,
@@ -21,29 +21,27 @@ metrics monitored are as follows:
* `/custom/epp/processing_time` -- A [Distribution][distribution] representing
the processing time for EPP requests, described by command name, client id,
and return status code.
* `/custom/whois/requests` -- A count of WHOIS requests, described by command
* `/custom/rdap/requests` -- A count of RDAP requests, described by command
name, number of returned results, and return status code.
* `/custom/whois/processing_time` -- A [Distribution][distribution]
representing the processing time for WHOIS requests, described by command
* `/custom/rdap/processing_time` -- A [Distribution][distribution]
representing the processing time for RDAP requests, described by command
name, number of returned results, and return status code.
Follow the guide to [set up a Stackdriver
account](https://cloud.google.com/monitoring/accounts/guide) and associate it
with the GCP project containing the Nomulus App Engine app. Once the two have
been linked, monitoring will start automatically. For now, because the
Follow the guide to
[set up a Stackdriver account](https://cloud.google.com/monitoring/accounts/guide)
and associate it with the GCP project containing the Nomulus app. Once the two
have been linked, monitoring will start automatically. For now, because the
visualization of custom metrics in Stackdriver is embryronic, you can retrieve
and visualize the collected metrics with a script, as described in the guide on
[Reading Time
Series](https://cloud.google.com/monitoring/custom-metrics/reading-metrics) and
the [custom metric code
sample](https://github.com/GoogleCloudPlatform/python-docs-samples/blob/master/monitoring/api/v3/custom_metric.py).
[Reading Time Series](https://cloud.google.com/monitoring/custom-metrics/reading-metrics)
and the
[custom metric code sample](https://github.com/GoogleCloudPlatform/python-docs-samples/blob/master/monitoring/api/v3/custom_metric.py).
In addition to the included white-box monitoring, black-box monitoring should be
set up to exercise the functionality of the registry platform as a user would
see it. This monitoring should, for example, create a new domain name every few
minutes via EPP and then verify that the domain exists in DNS and WHOIS. For
now, no black-box monitoring implementation is provided with the Nomulus
platform.
minutes via EPP and then verify that the domain exists in DNS and RDAP. For now,
no black-box monitoring implementation is provided with the Nomulus platform.
## Updating cursors
@@ -80,11 +78,11 @@ scripts.
[RDE](https://newgtlds.icann.org/en/applicants/data-escrow) is a daily deposit
of the contents of the registry, sent to a third-party escrow provider. The
details are contained in Specification 2 of the [registry
agreement][registry-agreement].
details are contained in Specification 2 of the
[registry agreement][registry-agreement].
Nomulus provides [code to generate and send these
deposits](./operational-procedures/rde-deposits.md).
Nomulus provides
[code to generate and send these deposits](./operational-procedures/rde-deposits.md).
### Monthly registry activity and transaction reporting
@@ -92,8 +90,8 @@ ICANN requires monthly activity and transaction reporting. The details are
contained in Specification 3 of the [registry agreement][registry-agreement].
These reports are mostly generated by querying the Cloud SQL database. There is
currently a Google proprietary class to query DNS related activities that is
not included in the open source Nomulus release.
currently a Google proprietary class to query DNS related activities that is not
included in the open source Nomulus release.
### Zone File Access (ZFA)
@@ -102,27 +100,29 @@ information. The details are contained in part 2 of Specification 4 of the
[registry agreement][registry-agreement].
This information will come from the DNS server, rather than Nomulus itself, so
ZFA is not part of the Nomulus release.
ZFA is not directly part of the Nomulus release.
### Bulk Registration Data Access (BRDA)
BRDA is a weekly archive of the contents of the registry. The details are
contained in part 3 of Specification 4 of the [registry
agreement][registry-agreement].
contained in part 3 of Specification 4 of the
[registry agreement][registry-agreement].
ICANN uses sFTP to retrieve BRDA data from a server provided by the registry.
Nomulus provides [code to generate these
deposits](./operational-procedures/brda-deposits.md), but a separate sFTP server
must be configured, and the deposits must be moved onto the server for access by
ICANN.
Nomulus provides
[code to generate these deposits](./operational-procedures/brda-deposits.md),
but a separate sFTP server must be configured, and the deposits must be moved
onto the server for access by ICANN.
### Spec 11 reporting
[Spec 11][spec-11] reporting must be provided to ICANN as part of their
anti-abuse efforts. This is covered in Specification 11 of the
[registry agreement][registry-agreement], but the details are little spotty.
The Nomulus release does not generate these reports.
Nomulus provides
[code](https://github.com/google/nomulus/blob/master/core/src/main/java/google/registry/beam/spec11/Spec11Pipeline.java)
to generate and send these reports, run on
[a schedule](https://github.com/google/nomulus/blob/master/core/src/main/java/google/registry/config/files/tasks/cloud-scheduler-tasks-production.xml#L257-L267)
[distribution]: https://cloud.google.com/monitoring/api/ref_v3/rest/v3/TypedValue#Distribution
[registry-agreement]: https://newgtlds.icann.org/sites/default/files/agreements/agreement-approved-09jan14-en.pdf

View File

@@ -28,8 +28,8 @@ The BRDA copy task reads the previous file and creates two files:
```
If you see an `xml.ghostryde` file but not the others, an error has occurred
during the process. If you see the files in the
{PROJECT-ID}-icann-brda bucket as well, the process has completed successfully.
during the process. If you see the files in the {PROJECT-ID}-icann-brda bucket
as well, the process has completed successfully.
Once the files have been created, they must be stored on an sFTP server from
which ICANN can pull the files. The Nomulus project does not provide this last
@@ -40,11 +40,11 @@ The cursor can be checked using the `nomulus pending_escrow` command.
## Generating BRDA deposits manually
* Get a list of "REAL" (as opposed to TEST) TLDs. Doublecheck that the command
output doesn't contain any TLDs for tests.
* Get a list of "REAL" (as opposed to TEST) TLDs. Double-check that the
command output doesn't contain any TLDs for tests.
```shell
$ registry-tool -e production list_tlds --fields=tldStr,tldType | grep REAL | awk '{print $1}' > realtlds.txt`
$ nomulus -e production list_tlds --fields=tldStr,tldType | grep REAL | awk '{print $1}' > realtlds.txt`
```
* Generate .ryde and .sig files of TLDs specified for given date(s) in the

View File

@@ -0,0 +1,26 @@
# Creating or Modifying TLDs
Nomulus stores YAML representations of TLDs, in an effort to make sure that any
(potentially significant) modifications to TLDs go through source control and
code review. We recommend storing these TLD YAML representations in a separate
private repository so that changes can be verified by multiple people before
being merged
([here is an example TLD](https://github.com/google/nomulus/blob/master/core/src/test/resources/google/registry/tools/tld.yaml))
Creating and updating a TLD use the same process -- the only difference is
whether you're creating a TLD YAML file from scratch or modifying an existing
one.
Similar to [premium lists](premium-list-management.md) and
[reserved lists](reserved-list-management.md), we recommend modifying TLDs as a
part of an automated build process after the desired changes have been merged
into the TLD YAML files. The automated process should run:
```shell
nomulus -e {ENVIRONMENT} configure_tld --build_environment --input=path/to/my/file/tld.yaml
```
The `build_environment` flag signals that this is being run as part of an
automated build process and should ideally not be used manually. There is an
additional `--break_glass` argument that can be used in emergencies to modify
TLDs outside a normal build process.

View File

@@ -69,6 +69,18 @@ Perform this command? (y/N): y
Successfully saved premium list exampletld
```
### Note:
We recommend only updating premium lists manually in the case of emergencies.
Instead, we run the `update_premium_list` command (as well as `configure_tld`
and `update_reserved_list` commands) as part of the build process after a pull
request has been merged into the private source code repository that contains
the files. The `--build_environment` flag is used to signal that the command is
being run in one of those automated environments, and thus allowed to modify
production. Without that flag, commands against production will fail.
This is similar to the process for [updating TLDs](modifying-tlds.md).
If this premium list is already applied to a TLD, then changes will take up to
60 minutes to take effect (depending on how you've configured the relevant
caching interval; 60 minutes is the default).
@@ -80,16 +92,15 @@ premium list must first be applied to a TLD before it will take effect. You will
only need to do this when first creating a premium list; once it has been
applied, it stays applied, and updates to the list are effective automatically.
Note that each TLD can have no more than one premium list applied to it. To
apply a premium list to a TLD, run the `update_tld` command with the following
parameter:
apply a premium list to a TLD,
[update the TLD to set the premium list](modifying-tlds.md):
```shell
$ nomulus -e {ENVIRONMENT} update_tld exampletld --premium_list exampletld
Update Registry@exampletld
premiumList: null -> Key<?>(EntityGroupRoot("cross-tld")/PremiumList("exampletld"))
Perform this command? (y/N): y
Updated 1 entities.
...
pendingDeleteLength: "PT432000S"
premiumListName: "test"
pricingEngineClassName: "google.registry.model.pricing.StaticPremiumListPricingEngine"
...
```
## Checking which premium list is applied to a TLD
@@ -100,7 +111,7 @@ all other information about a TLD). It is used as follows:
```shell
$ nomulus -e {ENVIRONMENT} get_tld exampletld
[ ... snip output ... ]
premiumList=Key<?>(EntityGroupRoot("cross-tld")/PremiumList("exampletld"))
premiumListName: "test"
[ ... snip output ... ]
```
@@ -127,10 +138,10 @@ $ nomulus -e production check_domain {domain_name}
[ ... snip output ... ]
```
**Note that the list can be cached for up to 60 minutes, so the old value may
**Note that the list can be cached for up to 60 minutes, so the old value may
still be returned for a little while**. If it is urgent that the new pricing
changes be applied, and it's OK to potentially interrupt client connections,
then you can use the App Engine web console to kill instances of the `default`
then you can use the GCP web console to kill instances of the `frontend`
service, as the cache is per-instance. Once you've killed all the existing
instances (don't kill them all at once!), all of the newly spun up instances
will now be using the new values you've configured.
instances (don't kill them all at once!), all the newly spun up instances will
now be using the new values you've configured.

View File

@@ -16,9 +16,9 @@ phases:
3. [Report](https://github.com/google/nomulus/blob/master/java/google/registry/rde/RdeReportAction.java):
Transmit XML *report* file to ICANN via HTTPS.
Each phase happens with an App Engine task queue entry that retries on failure.
When each task succeeds, it automatically enqueues a task for the next phase in
the process. The staging files are stored in Google Cloud Storage indefinitely,
Each phase happens with an GCP task queue entry that retries on failure. When
each task succeeds, it automatically enqueues a task for the next phase in the
process. The staging files are stored in Google Cloud Storage indefinitely,
encrypted with the GhostRyDE container format.
Note that in order for the automated RDE processing to work correctly, you will
@@ -99,9 +99,10 @@ that no cooldown period is necessary.
## Listing deposits in Cloud Storage
You can list the files in Cloud Storage for a given TLD using the gcloud storage tool.
All files are stored in the {PROJECT-ID}-rde bucket, where {PROJECT-ID} is the
name of the App Engine project for the particular environment you are checking.
You can list the files in Cloud Storage for a given TLD using the gcloud storage
tool. All files are stored in the {PROJECT-ID}-rde bucket, where {PROJECT-ID} is
the name of the App Engine project for the particular environment you are
checking.
```shell
$ gcloud storage ls gs://{PROJECT-ID}-rde/zip_2015-05-16*
@@ -116,10 +117,12 @@ Under normal circumstances, RDE is launched by TldFanoutAction, configured in
cron.xml. If the App Engine's cron executor isn't working, you can spawn it
manually by visiting the following URL:
https://backend-dot-{PROJECT-ID}.appspot.com/_dr/task/rdeStaging
```
https://backend.mydomain.com/_dr/task/rdeStaging
```
That will spawn a staging task for each TLD under the backend module in that App
Engine project. You can also run the task from the cron tab of the GAE console.
That will spawn a staging task for each TLD under the backend module in that GCP
project. You can also run the task from the GCP Cloud Scheduler UI.
## Notification of upload problems
@@ -157,7 +160,7 @@ space the uploading at least two hours apart.
Note that this warning only applies when you (re)upload files directly to the
sFTP server. There's an RDE_UPLOAD_SFTP cursor that prevents the production
system from uploading twice in a two hour window, so when you let the production
system from uploading twice in a two-hour window, so when you let the production
job upload missing deposits, it will be safe. Therefore, one safe approach is to
reset the cursor, then kick off the production job manually.
@@ -172,7 +175,7 @@ $ gcloud storage cat gs://{PROJECT-ID}-rde/foo.ghostryde | nomulus -e production
## Identifying which phase of the process failed
Analyze the GAE logs on the backend module.
Analyze the GCP logs on the backend module.
If the rdeStaging task failed, then it's likely the files do not exist in cloud
storage.
@@ -308,8 +311,8 @@ sftp> put ${tld}_2015-05-16_full_S1_R0.sig
It would be convenient to have the following in your `~/.ssh/config` file and
store the SSH private key that you stored in `rde-ssh-client-private` as
`~/.ssh/id_rsa_rde` so that you can simply run `$ sftp rde` to connect to
the sFTP server.
`~/.ssh/id_rsa_rde` so that you can simply run `$ sftp rde` to connect to the
sFTP server.
```
Host rde

View File

@@ -5,36 +5,26 @@ for various reasons, usually because of potential abuse.
## Reserved list file format
Reserved lists are handled in a similar way to [premium
lists](./premium-list-management.md), except that instead of each label having
a price, it has a reservation type. The valid values for reservation types are:
Reserved lists are handled in a similar way to
[premium lists](./premium-list-management.md), except that instead of each label
having a price, it has a reservation type. The valid values for reservation
types are:
* **`NAMESERVER_RESTRICTED`** - Only nameservers included here can be set on a
domain with this label. If the a label in this type exists on multiple
reserved lists that are applied to the same TLD. The set of allowed
nameservers for that label in that TLD is the intersection of all applicable
nameservers. Note that this restriction is orthogonal to the TLD-wide
nameserver restrictions that may be otherwise imposed. The ultimate set of
allowed nameservers for a certain domain is the intersection of per-domain
and TLD-wide allowed nameservers set. Furthermore, a TLD can be set in a
domain create restricted mode, in which case **only** domains that are
reserved with this type can be registered.
* **`ALLOWED_IN_SUNRISE`** - The label can be registered during the sunrise
period by a registrant with a valid claim but it is reserved thereafter.
* **`RESERVED_FOR_SPECIFIC_USE`** - The label is reserved for the use of a
specific registrant, and can only be registered by someone sending along the
allocation token at time of registration. This token is configured on an
`AllocationToken` entity with a matching `domainName`, and is sent by the
registrar using the [allocation token EPP
extension](https://tools.ietf.org/id/draft-ietf-regext-allocation-token-07.html).
registrar using the
[allocation token EPP extension](https://tools.ietf.org/id/draft-ietf-regext-allocation-token-07.html).
* **`RESERVED_FOR_ANCHOR_TENANT`** - Like `RESERVED_FOR_SPECIFIC_USE`, except
for an anchor tenant (i.e. a registrant participating in a [Qualified Launch
Program](https://newgtlds.icann.org/en/announcements-and-media/announcement-10apr14-en)),
for an anchor tenant (i.e. a registrant participating in a
[Qualified Launch Program](https://newgtlds.icann.org/en/announcements-and-media/announcement-10apr14-en)),
meaning that registrations can occur during sunrise ahead of GA, and must be
for a two year term.
* **`NAME_COLLISION`** - The label is reserved because it is on an [ICANN
collision
list](https://www.icann.org/resources/pages/name-collision-2013-12-06-en).
* **`NAME_COLLISION`** - The label is reserved because it is on an
[ICANN collision list](https://www.icann.org/resources/pages/name-collision-2013-12-06-en).
It may be registered during sunrise by a registrant with a valid claim but
is reserved thereafter. The `SERVER_HOLD` status is automatically applied
upon registration, which will prevent the domain name from ever resolving in
@@ -53,16 +43,13 @@ label is reserved due to name collision (with message "Cannot be delegated"). In
general `FULLY_BLOCKED` is by far the most widely used reservation type for
typical TLD use cases.
Here's an example of a small reserved list. Note that the
`NAMESERVER_RESTRICTED` label has a third entry, a colon separated list of
nameservers that the label can be delegated to:
Here's an example of a small reserved list:
```
reserveddomain,FULLY_BLOCKED
availableinga,ALLOWED_IN_SUNRISE
fourletterword,FULLY_BLOCKED
acmecorp,RESERVED_FOR_ANCHOR_TENANT
internaldomain,NAMESERVER_RESTRICTED,ns1.internal.tld:ns1.internal.tld
```
# Reserved list file name format
@@ -96,8 +83,8 @@ Updated 1 entities.
Note that `-i` is the input file containing the list. You can optionally specify
the name of the reserved list using `-n`, but when it's omitted as above the
list name is inferred from the name of the filename (minus the file extension).
For ease of tracking track of things, it is recommended to store all lists such
that the filename and list name are identical.
For ease of tracking, it is recommended to store all lists such that the
filename and list name are identical.
You're not done yet! After creating the reserved list you must the apply it to
one or more TLDs (see below) for it to actually be used.
@@ -127,22 +114,16 @@ reserved lists applied. The list of reserved labels for a TLD is the union of
all applied reserved lists, using the precedence rules described earlier when a
label appears in more than one list.
To add a reserved list to a TLD, run the `update_tld` command with the following
parameter:
To add a reserved list to a TLD, [update the TLD](modifying-tlds.md):
```shell
$ nomulus -e {ENVIRONMENT} update_tld exampletld \
--add_reserved_lists common_bad-words
Update Registry@exampletld
reservedLists: null -> [Key<?>(EntityGroupRoot("cross-tld")/ReservedList("common_bad-words"))]
Perform this command? (y/N): y
Updated 1 entities.
...
reservedListNames:
- "common_bad-words"
- "exampletld_specialized-reservations"
...
```
The `--add_reserved_lists` parameter can take a comma-delimited list of reserved
list names if you are applying multiple reserved lists to a TLD. There is also a
`--remove_reserved_lists` parameter that functions as you might expect.
Naming rules are enforced: reserved lists that start with `common_` can be
applied to any TLD (though they don't automatically apply to all TLDs), whereas
reserved lists that start with the name of a TLD can only be applied to the TLD
@@ -156,9 +137,11 @@ purposes here. It is used as follows:
```shell
$ nomulus -e {ENVIRONMENT} get_tld exampletld
[ ... snip output ... ]
reservedLists=[Key<?>(EntityGroupRoot("cross-tld")/ReservedList("common_bad-words"))]
[ ... snip output ... ]
...
reservedListNames:
- "common_bad-words"
- "exampletld_specialized-reservations"
...
```
## Listing all available reserved lists
@@ -184,10 +167,10 @@ $ nomulus -e production check_domain {domain_name}
[ ... snip output ... ]
```
**Note that the list can be cached for up to 60 minutes, so changes may not
take place immediately**. If it is urgent that the new changes be applied, and
it's OK to potentially interrupt client connections, then you can use the App
Engine web console to kill instances of the `default` service, as the cache is
**Note that the list can be cached for up to 60 minutes, so changes may not take
place immediately**. If it is urgent that the new changes be applied, and it's
OK to potentially interrupt client connections, then you can use the GCP web
console to kill instances of the `frontend` service, as the cache is
per-instance. Once you've killed all the existing instances (don't kill them all
at once!), all of the newly spun up instances will now be using the new values
at once!), all the newly spun up instances will now be using the new values
you've configured.

View File

@@ -1,119 +1,35 @@
# TLD security restrictions
Nomulus has several security features that allow registries to impose additional
restrictions on which domains are allowed on a TLD and what
registrant/nameservers they can have. The restrictions can be applied to an
entire TLD or on a per-domain basis. These restrictions are intended for use on
closed TLDs that need to allow external registrars, and prevent undesired domain
registrations or updates from occurring, e.g. if a registrar makes an error or
is compromised. For closed TLDs that do not need external registrars, a simpler
solution is to not grant any registrars access to the TLD.
restrictions on which domains are allowed on a TLD and what nameservers they can
have. The restrictions can be applied to an entire TLD or on a per-domain basis.
These restrictions are intended for use on closed TLDs that need to allow
external registrars, and prevent undesired domain registrations or updates from
occurring, e.g. if a registrar makes an error or is compromised. For closed TLDs
that do not need external registrars, a simpler solution is to not grant any
registrars access to the TLD.
This document outlines the various restrictions available, their use cases, and
how to apply them.
## TLD-wide nameserver/registrant restrictions
Nomulus allows registry administrators to set registrant contact and/or
nameserver restrictions on a TLD. This is typically desired for brand TLDs on
which all domains are either self-hosted or restricted to a small set of
webhosts.
Nomulus allows registry administrators to set nameserver restrictions on a TLD.
This is typically desired for brand TLDs on which all domains are either
self-hosted or restricted to a small set of webhosts.
To configure allowed nameservers on a TLD, use the
`--allowed_nameservers`, `--add_allowed_nameservers`, and
`--remove_allowed_nameservers` parameters on the `update_tld` command as
follows:
```shell
$ nomulus -e {ENVIRONMENT} update_tld --allowed_nameservers {NS1,NS2,...} {TLD}
```
Note that `--allowed_nameservers` can also be used with the `create_tld` command
when the TLD is initially created.
To set the allowed registrants, use the analogous `--allowed_registrants`,
`--add_allowed_registrants`, and `--remove_allowed_registrants` parameters:
```shell
$ nomulus -e {ENVIRONMENT} update_tld \
--allowed_registrants {CONTACTID1,CONTACTID2,...} {TLD}
```
When nameserver or registrant restrictions are set on a TLD, any domain mutation
flow under that TLD will verify that the supplied nameservers or registrants
are not empty and that they are a strict subset of the allowed nameservers and
registrants on the TLD. If no restrictions are set, domains can be created or
updated without nameservers, but registrant is still always required.
## Per-domain nameserver restrictions
Registries can also elect to impose per-domain nameserver restrictions. This
restriction is orthogonal to the TLD-wide nameserver restriction detailed above.
Any domain mutation must pass both validations (if applicable). In practice, it
is recommended to maintain consistency between the two types of lists by making
the per-domain allowed nameserver list a subset of the TLD-wide one, because any
nameservers that are not included in both lists are effectively disallowed.
The per-domain allowed nameserver lists are configured in [reserved
list](./reserved-list-management.md) entries with the reservation type
`NAMESERVER_RESTRICTED`. The final element in the entry is the colon-delimited
list of nameservers, e.g.:
To [configure allowed nameservers on a TLD](modifying-tlds.md), use the
`allowedFullyQualifiedHostNames` field in the TLD YAML file:
```
restrictedsld,NAMESERVER_RESTRICTED,ns1.mycompany.tld:ns2.mycompany.tld
addGracePeriodLength: "PT432000S"
allowedFullyQualifiedHostNames:
- "ns1.test.goog"
- "ns2.test.goog"
- "ns3.test.goog"
```
Note that multiple reserved lists can be applied to a TLD. If different reserved
lists contain nameserver restrictions for the same label, then the resulting
restriction set is the set intersection of all allowed nameserver lists for that
label.
## Domain create restriction on closed TLDs
Nomulus offers the ability to "lock-down" a TLD so that domain registration is
forbidden except for allow-listed domain names. This is achieved by setting the
"domain create restricted" option on the TLD using the `nomulus` tool. Domains
are allow-listed for registration by adding them to reserved lists with entries
of type `NAMESERVER_RESTRICTED`. Each domain will thus also need to have
explicitly allowed nameservers configured in its reserved list entry, per the
previous section.
To apply domain create restriction when creating/updating a TLD, use the
`--domain_create_restricted` parameter as follows:
```shell
$ nomulus -e {ENVIRONMENT} [create_tld | update_tld] \
--domain_create_restricted [true | false] {TLD}
```
Note that you do **not** have to set a TLD-wide allowed nameservers list with
this option, because it operates independently from the per-domain nameservers
restriction that `NAMESERVER_RESTRICTED` reservation imposes.
In addition to disabling registration of non-allow-listed domains, setting a TLD
as domain create restricted also applies the `SERVER_UPDATE_PROHIBITED` and
`SERVER_TRANSFER_PROHIBITED` statuses to domains upon creation. Any domains on a
domain create restricted TLD are therefore virtually immutable, and must be
unlocked by the registry operator before each change can be made. For more
information on these EPP statuses, see [RFC
5731](https://tools.ietf.org/html/rfc5731#section-2.3).
To an unlock a locked domain so that a registrar can make changes, the registry
operator must remove the status using a `nomulus` tool command as follows:
```shell
$ nomulus -e {ENVIRONMENT} update_server_locks \
--remove SERVER_UPDATE_PROHIBITED,SERVER_TRANSFER_PROHIBITED \
--client {REGISTRAR_CLIENT_ID}
--n {DOMAIN}
```
Note that these statuses will be reapplied immediately after any transfer/update
so long as the TLD is still set to domain create restricted.
Since the domain create restricted facility is intended for use on closed TLDs,
validation/server lock does not happen in domain application and allocate flows.
Most closed TLDs do not have a sunrise period, so this is fine, but for the
unanticipated occasion that a sunrise period is necessary, it suffices to
manually ensure that all domains are correct immediately after entering general
availability, after which no additional disallowed changes can be made.
When nameserver restrictions are set on a TLD, any domain mutation flow under
that TLD will verify that the supplied nameservers are not empty and that they
are a strict subset of the allowed nameservers and registrants on the TLD. If no
restrictions are set, domains can be created or updated without nameservers.

View File

@@ -2,22 +2,22 @@
This doc covers procedures to configure, build and deploy the
[Netty](https://netty.io)-based proxy onto [Kubernetes](https://kubernetes.io)
clusters. [Google Kubernetes
Engine](https://cloud.google.com/kubernetes-engine/) is used as deployment
target. Any kubernetes cluster should in theory work, but the user needs to
change some dependencies on other GCP features such as Cloud KMS for key
management and Stackdriver for monitoring.
clusters.
[Google Kubernetes Engine](https://cloud.google.com/kubernetes-engine/) is used
as deployment target. Any kubernetes cluster should in theory work, but the user
needs to change some dependencies on other GCP features such as Cloud KMS for
key management and Stackdriver for monitoring.
## Overview
Nomulus runs on Google App Engine, which only supports HTTP(S) traffic. In order
to work with [EPP](https://tools.ietf.org/html/rfc5730.html) (TCP port 700) and
[WHOIS](https://tools.ietf.org/html/rfc3912) (TCP port 43), a proxy is needed to
relay traffic between clients and Nomulus and do protocol translation.
Nomulus runs on GKE, and natively only supports HTTP(S) traffic. In order to
work with [EPP](https://tools.ietf.org/html/rfc5730.html) (TCP port 700), a
proxy is needed to relay traffic between clients and Nomulus and do protocol
translation.
We provide a Netty-based proxy that runs as a standalone service (separate from
Nomulus) either on a VM or Kubernetes clusters. Deploying to kubernetes is
recommended as it provides automatic scaling and management for docker
Nomulus) either on a VM or Kubernetes clusters. Deploying to Kubernetes is
recommended as it provides automatic scaling and management for Docker
containers that alleviates much of the pain of running a production service.
The procedure described here can be used to set up a production environment, as
@@ -26,13 +26,13 @@ However, proper release management (cutting a release, rolling updates, canary
analysis, reliable rollback, etc) is not covered. The user is advised to use a
service like [Spinnaker](https://www.spinnaker.io/) for release management.
## Detailed Instruction
## Detailed Instructions
We use [`gcloud`](https://cloud.google.com/sdk/gcloud/) and
[`terraform`](https://terraform.io) to configure the proxy project on GCP and to create a GCS
bucket for storing the terraform state file. We use
[`kubectl`](https://kubernetes.io/docs/tasks/tools/install-kubectl/) to deploy
the proxy to the project. These instructions assume that all three tools are
[`terraform`](https://terraform.io) to configure the proxy project on GCP and to
create a GCS bucket for storing the terraform state file. We use
[`kubectl`](https://kubernetes.io/docs/tasks/tools/install-kubectl/) to deploy
the proxy to the project. These instructions assume that all three tools are
installed.
### Setup GCP project
@@ -41,9 +41,9 @@ There are three projects involved:
- Nomulus project: the project that hosts Nomulus.
- Proxy project: the project that hosts this proxy.
- GCR ([Google Container
Registry](https://cloud.google.com/container-registry/)) project: the
project from which the proxy pulls its Docker image.
- GCR
([Google Container Registry](https://cloud.google.com/container-registry/))
project: the project from which the proxy pulls its Docker image.
We recommend using the same project for Nomulus and the proxy, so that logs for
both are collected in the same place and easily accessible. If there are
@@ -64,16 +64,16 @@ $ gcloud storage buckets create gs://<bucket-name>/ --project <proxy-project>
### Obtain a domain and SSL certificate
The proxy exposes two endpoints, whois.\<yourdomain.tld\> and
epp.\<yourdomain.tld\>. The base domain \<yourdomain.tld\> needs to be obtained
from a registrar ([Google Domains](https://domains.google) for example). Nomulus
operators can also self-allocate a domain in the TLDs under management.
The proxy exposes one endpoint: `epp.<yourdomain.tld>`. The base domain
`<yourdomain.tld>` needs to be obtained from a registrar (RIP to
[Google Domains](https://domains.google/)). Nomulus operators can also
self-allocate a domain in the TLDs under management.
[EPP protocol over TCP](https://tools.ietf.org/html/rfc5734) requires a
client-authenticated SSL connection. The operator of the proxy needs to obtain
an SSL certificate for domain epp.\<yourdomain.tld\>. [Let's
Encrypt](https://letsencrypt.org) offers SSL certificate free of charge, but any
other CA can fill the role.
an SSL certificate for domain `epp.<yourdomain.tld>`.
[Let's Encrypt](https://letsencrypt.org) offers SSL certificate free of charge,
but any other CA can fill the role.
Concatenate the certificate and its private key into one file:
@@ -82,7 +82,7 @@ $ cat <certificate.pem> <private.key> > <combined_secret.pem>
```
The order between the certificate and the private key inside the combined file
does not matter. However, if the certificate file is chained, i. e. it contains
does not matter. However, if the certificate file is chained, i.e. it contains
not only the certificate for your domain, but also certificates from
intermediate CAs, these certificates must appear in order. The previous
certificate's issuer must be the next certificate's subject.
@@ -92,8 +92,9 @@ bucket will be created automatically by terraform.
### Setup proxy project
First setup the [Application Default
Credential](https://cloud.google.com/docs/authentication/production) locally:
First setup the
[Application Default Credential](https://cloud.google.com/docs/authentication/production)
locally:
```bash
$ gcloud auth application-default login
@@ -102,10 +103,9 @@ $ gcloud auth application-default login
Login with the account that has "Project Owner" role of all three projects
mentioned above.
Navigate to `proxy/terraform`, create a folder called
`envs`, and inside it, create a folder for the environment that proxy is
deployed to ("alpha" for example). Copy `example_config.tf` and `outputs.tf`
to the environment folder.
Navigate to `proxy/terraform`, create a folder called `envs`, and inside it,
create a folder for the environment that proxy is deployed to ("alpha" for
example). Copy `example_config.tf` and `outputs.tf` to the environment folder.
```bash
$ cd proxy/terraform
@@ -132,12 +132,12 @@ takes a couple of minutes.
### Setup Nomulus
After terraform completes, it outputs some information, among which is the
email address of the service account created for the proxy. This needs to be
added to the Nomulus configuration file so that Nomulus accepts traffic from the
proxy. Edit the following section in
`java/google/registry/config/files/nomulus-config-<env>.yaml` and redeploy
Nomulus:
After terraform completes, it outputs some information, among which is the email
address of the service account created for the proxy. This needs to be added to
the Nomulus configuration file so that Nomulus accepts traffic from the proxy.
Edit the following section in
`core/src/main/java/google/registry/config/files/nomulus-config-<env>.yaml` and
redeploy Nomulus:
```yaml
auth:
@@ -148,7 +148,7 @@ auth:
### Setup nameservers
The terraform output (run `terraform output` in the environment folder to show
it again) also shows the nameservers of the proxy domain (\<yourdomain.tld\>).
it again) also shows the nameservers of the proxy domain (`<yourdomain.tld>`).
Delegate this domain to these nameservers (through your registrar). If the
domain is self-allocated by Nomulus, run:
@@ -160,8 +160,8 @@ $ nomulus -e production update_domain <yourdomain.tld> \
### Setup named ports
Unfortunately, terraform currently cannot add named ports on the instance groups
of the GKE clusters it manages. [Named
ports](https://cloud.google.com/compute/docs/load-balancing/http/backend-service#named_ports)
of the GKE clusters it manages.
[Named ports](https://cloud.google.com/compute/docs/load-balancing/http/backend-service#named_ports)
are needed for the load balancer it sets up to route traffic to the proxy. To
set named ports, in the environment folder, do:
@@ -189,8 +189,9 @@ $ gcloud storage cp <combined_secret.pem.enc> gs://<your-certificate-bucket>
### Edit proxy config file
Proxy configuration files are at `java/google/registry/proxy/config/`. There is
a default config that provides most values needed to run the proxy, and several
Proxy configuration files are at
`proxy/src/main/java/google/registry/proxy/config/`. There is a default config
that provides most values needed to run the proxy, and several
environment-specific configs for proxy instances that communicate to different
Nomulus environments. The values specified in the environment-specific file
override those in the default file.
@@ -202,16 +203,33 @@ detailed descriptions on each field.
### Upload proxy docker image to GCR
Edit the `proxy_push` rule in `java/google/registry/proxy/BUILD` to add the GCR
project name and the image name to save to. Note that as currently set up, all
images pushed to GCR will be tagged `bazel` and the GKE deployment object loads
the image tagged as `bazel`. This is fine for testing, but for production one
should give images unique tags (also configured in the `proxy_push` rule).
The GKE deployment manifest is set up to pull the proxy docker image from
[Google Container Registry](https://cloud.google.com/container-registry/) (GCR).
Instead of using `docker` and `gcloud` to build and push images, respectively,
we provide `gradle` rules for the same tasks. To push an image, first use
[`docker-credential-gcr`](https://github.com/GoogleCloudPlatform/docker-credential-gcr)
to obtain necessary credentials. It is used by the Gradle to push the image.
After credentials are configured, verify that Gradle will use the proper
`gcpProject` for deployment in the main `build.gradle` file. We recommend using
the same project and image for proxies intended for different Nomulus
environments, this way one can deploy the same proxy image first to sandbox for
testing, and then to production.
To push to GCR, run:
```bash
$ bazel run java/google/registry/proxy:proxy_push
$ ./gradlew proxy:pushProxyImage
```
If the GCP project to host images (gcr project) is different from the project
that the proxy runs in (proxy project), give the service account "Storage Object
Viewer" role of the gcr project.
```bash
$ gcloud projects add-iam-policy-binding <image-project> \
--member serviceAccount:<service-account-email> \
--role roles/storage.objectViewer
```
### Deploy proxy
@@ -243,9 +261,9 @@ Repeat this for all three clusters.
### Afterwork
Remember to turn on [Stackdriver
Monitoring](https://cloud.google.com/monitoring/docs/) for the proxy project as
we use it to collect metrics from the proxy.
Remember to turn on
[Stackdriver Monitoring](https://cloud.google.com/monitoring/docs/) for the
proxy project as we use it to collect metrics from the proxy.
You are done! The proxy should be running now. You should store the private key
safely, or delete it as you now have the encrypted file shipped with the proxy.
@@ -278,14 +296,14 @@ in multiple zones to provide geographical redundancy.
### Create service account
The proxy will run with the credential of a [service
account](https://cloud.google.com/compute/docs/access/service-accounts). In
theory it can take advantage of [Application Default
Credentials](https://cloud.google.com/docs/authentication/production) and use
the service account that the GCE instance underpinning the GKE cluster uses, but
we recommend creating a separate service account. With a dedicated service
account, one can grant permissions only necessary to the proxy. To create a
service account:
The proxy will run with the credential of a
[service account](https://cloud.google.com/compute/docs/access/service-accounts).
In theory, it can take advantage of
[Application Default Credentials](https://cloud.google.com/docs/authentication/production)
and use the service account that the GCE instance underpinning the GKE cluster
uses, but we recommend creating a separate service account. With a dedicated
service account, one can grant permissions only necessary to the proxy. To
create a service account:
```bash
$ gcloud iam service-accounts create proxy-service-account \
@@ -303,10 +321,10 @@ $ gcloud iam service-accounts keys create proxy-key.json --iam-account \
A `proxy-key.json` file will be created inside the current working directory.
The service account email address needs to be added to the Nomulus
configuration file so that Nomulus accepts the OAuth tokens generated for this
service account. Add its value to
`java/google/registry/config/files/nomulus-config-<env>.yaml`:
The service account email address needs to be added to the Nomulus configuration
file so that Nomulus accepts the OAuth tokens generated for this service
account. Add its value to
`core/src/main/java/google/registry/config/files/nomulus-config-<env>.yaml`:
```yaml
auth:
@@ -325,27 +343,13 @@ $ gcloud projects add-iam-policy-binding <project-id> \
--role roles/logging.logWriter
```
### Obtain a domain and SSL certificate
A domain is needed (if you do not want to rely on IP addresses) for clients to
communicate to the proxy. Domains can be purchased from a domain registrar
([Google Domains](https://domains.google) for example). A Nomulus operator could
also consider self-allocating a domain under an owned TLD insteadl.
An SSL certificate is needed as [EPP over
TCP](https://tools.ietf.org/html/rfc5734) requires SSL. You can apply for an SSL
certificate for the domain name you intended to serve as EPP endpoint
(epp.nic.tld for example) for free from [Let's
Encrypt](https://letsencrypt.org). For now, you will need to manually renew your
certificate before it expires.
### Create keyring and encrypt the certificate/private key
The proxy needs access to both the private key and the certificate. Do *not*
package them directly with the proxy. Instead, use [Cloud
KMS](https://cloud.google.com/kms/) to encrypt them, ship the encrypted file
with the proxy, and call Cloud KMS to decrypt them on the fly. (If you want to
use another keyring solution, you will have to modify the proxy and implement
package them directly with the proxy. Instead, use
[Cloud KMS](https://cloud.google.com/kms/) to encrypt them, ship the encrypted
file with the proxy, and call Cloud KMS to decrypt them on the fly. (If you want
to use another keyring solution, you will have to modify the proxy and implement
yours)
Concatenate the private key file with the certificate. It does not matter which
@@ -378,7 +382,7 @@ A file named `ssl-cert-key.pem.enc` will be created. Upload it to a GCS bucket
in the proxy project. To create a bucket and upload the file:
```bash
$ gcloud storage buckets create gs://<bucket-name> --project <proxy-project>
$ gcloud storage buckets create gs://<bucket-name> --project <proxy-project>
$ gcloud storage cp ssl-cert-key.pem.enc gs://<bucket-name>
```
@@ -402,8 +406,9 @@ $ gcloud storage buckets add-iam-policy-binding gs://<bucket-name> \
### Proxy configuration
Proxy configuration files are at `java/google/registry/proxy/config/`. There is
a default config that provides most values needed to run the proxy, and several
Proxy configuration files are at
`proxy/src/main/java/google/registry/proxy/config/`. There is a default config
that provides most values needed to run the proxy, and several
environment-specific configs for proxy instances that communicate to different
Nomulus environments. The values specified in the environment-specific file
override those in the default file.
@@ -416,12 +421,12 @@ for detailed descriptions on each field.
### Setup Stackdriver for the project
The proxy streams metrics to
[Stackdriver](https://cloud.google.com/stackdriver/). Refer to [Stackdriver
Monitoring](https://cloud.google.com/monitoring/docs/) documentation on how to
enable monitoring on the GCP project.
[Stackdriver](https://cloud.google.com/stackdriver/). Refer to
[Stackdriver Monitoring](https://cloud.google.com/monitoring/docs/)
documentation on how to enable monitoring on the GCP project.
The proxy service account needs to have ["Monitoring Metric
Writer"](https://cloud.google.com/monitoring/access-control#predefined_roles)
The proxy service account needs to have
["Monitoring Metric Writer"](https://cloud.google.com/monitoring/access-control#predefined_roles)
role in order to stream metrics to Stackdriver:
```bash
@@ -464,44 +469,6 @@ tag for all clusters.
Repeat this for all the zones you want to create clusters in.
### Upload proxy docker image to GCR
The GKE deployment manifest is set up to pull the proxy docker image from
[Google Container Registry](https://cloud.google.com/container-registry/) (GCR).
Instead of using `docker` and `gcloud` to build and push images, respectively,
we provide `bazel` rules for the same tasks. To push an image, first use
[`docker-credential-gcr`](https://github.com/GoogleCloudPlatform/docker-credential-gcr)
to obtain necessary credentials. It is used by the [bazel container_push
rules](https://github.com/bazelbuild/rules_docker#authentication) to push the
image.
After credentials are configured, edit the `proxy_push` rule in
`java/google/registry/proxy/BUILD` to add the GCP project name and the image
name to save to. We recommend using the same project and image for proxies
intended for different Nomulus environments, this way one can deploy the same
proxy image first to sandbox for testing, and then to production.
Also note that as currently set up, all images pushed to GCR will be tagged
`bazel` and the GKE deployment object loads the image tagged as `bazel`. This is
fine for testing, but for production one should give images unique tags (also
configured in the `proxy_push` rule).
To push to GCR, run:
```bash
$ bazel run java/google/registry/proxy:proxy_push
```
If the GCP project to host images (gcr project) is different from the project
that the proxy runs in (proxy project), give the service account "Storage Object
Viewer" role of the gcr project.
```bash
$ gcloud projects add-iam-policy-binding <image-project> \
--member serviceAccount:<service-account-email> \
--role roles/storage.objectViewer
```
### Upload proxy service account key to GKE cluster
The kubernetes pods (containers) are configured to read the proxy service
@@ -555,22 +522,22 @@ Repeat the same step for all clusters you want to deploy to.
The proxies running on GKE clusters need to be exposed to the outside. Do not
use Kubernetes
[`LoadBalancer`](https://kubernetes.io/docs/concepts/services-networking/service/#type-loadbalancer).
It will create a GCP [Network Load
Balancer](https://cloud.google.com/compute/docs/load-balancing/network/), which
has several problems:
It will create a GCP
[Network Load Balancer](https://cloud.google.com/compute/docs/load-balancing/network/),
which has several problems:
- This load balancer does not terminate TCP connections. It simply acts as an
edge router that forwards IP packets to a "healthy" node in the cluster. As
such, it does not support IPv6, because GCE instances themselves are
currently IPv4 only.
- IP packets that arrived on the node may be routed to another node for
- IP packets that arrived at the node may be routed to another node for
reasons of capacity and availability. In doing so it will
[SNAT](https://en.wikipedia.org/wiki/Network_address_translation#SNAT) the
packet, therefore losing the source IP information that the proxy needs. The
proxy uses WHOIS source IP address to cap QPS and passes EPP source IP to
Nomulus for validation. Note that a TCP terminating load balancer also has
this problem as the source IP becomes that of the load balancer, but it can
be addressed in other ways (explained later). See
proxy uses source IP address to cap QPS and passes EPP source IP to Nomulus
for validation. Note that a TCP terminating load balancer also has this
problem as the source IP becomes that of the load balancer, but it can be
addressed in other ways (explained later). See
[here](https://kubernetes.io/docs/tutorials/services/source-ip/) for more
details on how Kubernetes route traffic and translate source IPs inside the
cluster.
@@ -581,8 +548,8 @@ has several problems:
Instead, we split the task of exposing the proxy to the Internet into two tasks,
first to expose it within the cluster, then to expose the cluster to the outside
through a [TCP Proxy Load
Balancer](https://cloud.google.com/compute/docs/load-balancing/tcp-ssl/tcp-proxy).
through a
[TCP Proxy Load Balancer](https://cloud.google.com/compute/docs/load-balancing/tcp-ssl/tcp-proxy).
This load balancer terminates TCP connections and allows for the use of a single
anycast IP address (IPv4 and IPv6) to reach any clusters connected to its
backend (it chooses a particular cluster based on geographical proximity). From
@@ -611,8 +578,8 @@ $ kubectl create -f \
proxy/kubernetes/proxy-service.yaml
```
This service object will open up port 30000 (health check), 30001 (WHOIS) and
30002 (EPP) on the nodes, routing to the same ports inside a pod.
This service object will open up port 30000 (health check) and 30002 (EPP) on
the nodes, routing to the same ports inside a pod.
Repeat this for all clusters.
@@ -641,7 +608,7 @@ Then set the named ports:
```bash
$ gcloud compute instance-groups set-named-ports <instance-group> \
--named-ports whois:30001,epp:30002 --zone <zone>
--named-ports epp:30002 --zone <zone>
```
Repeat this for each instance group (cluster).
@@ -689,7 +656,7 @@ routed to the corresponding port on a proxy pod. The backend service codifies
which ports on the node's clusters should receive traffic from the load
balancer.
Create one backend service for EPP and one for WHOIS:
Create a backend service for EPP:
```bash
# EPP backend
@@ -697,28 +664,18 @@ $ gcloud compute backend-services create proxy-epp-loadbalancer \
--global --protocol TCP --health-checks proxy-health --timeout 1h \
--port-name epp
# WHOIS backend
$ gcloud compute backend-services create proxy-whois-loadbalancer \
--global --protocol TCP --health-checks proxy-health --timeout 1h \
--port-name whois
```
These two backend services route packets to the epp named port and whois named
port on any instance group attached to them, respectively.
This ackend service routes packets to the EPP named port on any instance group
attached to it.
Then add (attach) instance groups that the proxies run on to each backend
service:
Then add (attach) instance groups that the proxies run on the backend service:
```bash
# EPP backend
$ gcloud compute backend-services add-backend proxy-epp-loadbalancer \
--global --instance-group <instance-group> --instance-group-zone <zone> \
--balancing-mode UTILIZATION --max-utilization 0.8
# WHOIS backend
$ gcloud compute backend-services add-backend proxy-whois-loadbalancer \
--global --instance-group <instance-group> --instance-group-zone <zone> \
--balancing-mode UTILIZATION --max-utilization 0.8
```
Repeat this for each instance group.
@@ -747,10 +704,10 @@ $ gcloud compute addresses describe proxy-ipv4 --global
$ gcloud compute addresses describe proxy-ipv6 --global
```
Set these IP addresses as the A/AAAA records for both epp.<nic.tld> and
whois.<nic.tld> where <nic.tld> is the domain that was obtained earlier. (If you
use [Cloud DNS](https://cloud.google.com/dns/) as your DNS provider, this step
can also be performed by `gcloud`)
Set these IP addresses as the A/AAAA records for epp.<nic.tld>where <nic.tld> is
the domain that was obtained earlier. (If you use
[Cloud DNS](https://cloud.google.com/dns/) as your DNS provider, this step can
also be performed by `gcloud`)
#### Create load balancer frontend
@@ -761,21 +718,16 @@ First create a TCP proxy (yes, it is confusing, this GCP resource is called
"proxy" as well) which is a TCP termination point. Outside connections terminate
on a TCP proxy, which establishes its own connection to the backend services
defined above. As such, the source IP address from the outside is lost. But the
TCP proxy can add the [PROXY protocol
header](https://www.haproxy.org/download/1.8/doc/proxy-protocol.txt) at the
beginning of the connection to the backend. The proxy running on the backend can
parse the header and obtain the original source IP address of a request.
Make one for each protocol (EPP and WHOIS).
TCP proxy can add the
[PROXY protocol header](https://www.haproxy.org/download/1.8/doc/proxy-protocol.txt)
at the beginning of the connection to the backend. The proxy running on the
backend can parse the header and obtain the original source IP address of a
request.
```bash
# EPP
$ gcloud compute target-tcp-proxies create proxy-epp-proxy \
--backend-service proxy-epp-loadbalancer --proxy-header PROXY_V1
# WHOIS
$ gcloud compute target-tcp-proxies create proxy-whois-proxy \
--backend-service proxy-whois-loadbalancer --proxy-header PROXY_V1
```
Note the use of the `--proxy-header` flag, which turns on the PROXY protocol
@@ -785,47 +737,36 @@ Next, create the forwarding rule that route outside traffic to a given IP to the
TCP proxy just created:
```bash
$ gcloud compute forwarding-rules create proxy-whois-ipv4 \
--global --target-tcp-proxy proxy-whois-proxy \
--address proxy-ipv4 --ports 43
$ gcloud compute forwarding-rules create proxy-epp-ipv4 \
--global --target-tcp-proxy proxy-epp-proxy \
--address proxy-ipv4 --ports 700
```
The above command sets up a forwarding rule that routes traffic destined to the
static IPv4 address reserved earlier, on port 43 (actual port for WHOIS), to the
TCP proxy that connects to the whois backend service.
static IPv4 address reserved earlier, on port 700 (actual port for EPP), to the
TCP proxy that connects to the EPP backend service.
Repeat the above command another three times, set up IPv6 forwarding for WHOIS,
and IPv4/IPv6 forwarding for EPP.
Repeat the above command to set up IPv6 forwarding for EPP
## Additional steps
### Check if it all works
At this point the proxy should be working and reachable from the Internet. Try
if a whois request to it is successful:
```bash
whois -h whois.<nic.tld> something
```
One can also try to contact the EPP endpoint with an EPP client.
### Check logs and metrics
The proxy saves logs to [Stackdriver
Logging](https://cloud.google.com/logging/), which is the same place that
Nomulus saves it logs to. On GCP console, navigate to Logging - Logs - GKE
Container - <cluster name> - default. Do not choose "All namespace_id" as it
includes logs from the Kubernetes system itself and can be quite overwhelming.
The proxy saves logs to
[Stackdriver Logging](https://cloud.google.com/logging/), which is the same
place that Nomulus saves it logs to. On GCP console, navigate to Logging -
Logs - GKE Container - <cluster name> - default. Do not choose "All
namespace_id" as it includes logs from the Kubernetes system itself and can be
quite overwhelming.
Metrics are stored in [Stackdriver
Monitoring](https://cloud.google.com/monitoring/docs/). To view the metrics, go
to Stackdriver [console](https://app.google.stackdriver.com) (also accessible
from GCE console under Monitoring), navigate to Resources - Metrics Explorer.
Choose resource type "GKE Container" and search for metrics with name "/proxy/"
in it. Currently available metrics include total connection counts, active
connection count, request/response count, request/response size, round-trip
latency and quota rejection count.
Metrics are stored in
[Stackdriver Monitoring](https://cloud.google.com/monitoring/docs/). To view the
metrics, go to Stackdriver [console](https://app.google.stackdriver.com) (also
accessible from GCE console under Monitoring), navigate to Resources - Metrics
Explorer. Choose resource type "GKE Container" and search for metrics with name
"/proxy/" in it. Currently available metrics include total connection counts,
active connection count, request/response count, request/response size,
round-trip latency and quota rejection count.
### Cleanup sensitive files

View File

@@ -1,18 +1,18 @@
# RDAP user's guide
[RDAP](https://www.icann.org/rdap) is a JSON REST protocol, served over HTTPS,
for retrieving registry information. It returns data similar to the WHOIS
service, but in a JSON-structured format. This document describes the Nomulus
system's support for the RDAP protocol.
for retrieving registry information. It returns data similar to the
previously-existing WHOIS service, but in a JSON-structured format. This
document describes the Nomulus system's support for the RDAP protocol.
## Quick example <a id="quick_example"></a>
RDAP information is available via regular Web queries. For example, if your App
Engine project ID is `project-id`, and `tld` is a TLD managed by that instance
of Nomulus, enter the following in a Web browser:
RDAP information is available via regular Web queries. For example, if your base
domain is `mydomain.com`, and `tld` is a TLD managed by that instance of
Nomulus, enter the following in a Web browser:
```
https://project-id.appspot.com/rdap/domains?name=*.tld
https://pubapi.mydomain.com/rdap/domains?name=*.tld
```
You should get back a long string of apparent JSON gobbledygook, listing the
@@ -24,34 +24,21 @@ the response in an expandable tree format.
## Introduction to RDAP <a id="introduction"></a>
RDAP is a next-generation protocol for dissemination of registry data. It is
eventually intended to replace the WHOIS protocol. RDAP was defined in 2015 in a
series of RFCs:
RDAP is a next-generation protocol for dissemination of registry data. It has
replaced the WHOIS protocol. RDAP was defined as STD 95 as a series of RFCs:
* [RFC 7480: HTTP Usage in the Registration Data Access Protocol
(RDAP)](https://tools.ietf.org/html/rfc7480)
* [RFC 7480: HTTP Usage in the Registration Data Access Protocol (RDAP)](https://tools.ietf.org/html/rfc7480)
* [RFC 7481: Security Services for the Registration Data Access Protocol
(RDAP)](https://tools.ietf.org/html/rfc7481)
* [RFC 7482: Registration Data Access Protocol (RDAP) Query
Format](https://tools.ietf.org/html/rfc7482)
* [RFC 7483: JSON Responses for the Registration Data Access Protocol
(RDAP)](https://tools.ietf.org/html/rfc7483)
* [RFC 7484: Finding the Authoritative Registration Data (RDAP)
Service](https://tools.ietf.org/html/rfc7484)
* [RFC 9082: Registration Data Access Protocol (RDAP) Query Format](https://tools.ietf.org/html/rfc9082)
* [RFC 9083: JSON Responses for the Registration Data Access Protocol (RDAP)](https://tools.ietf.org/html/rfc9083)
* [RFC 9224: Finding the Authoritative Registration Data (RDAP) Service](https://tools.ietf.org/html/rfc9224)
Using RDAP, users can send in standard HTTP requests with specific URLs (such as
`.../rdap/domain/example.com` to get information about the domain `example.com`,
or `.../rdap/domains?name=ex*.com` to get information about domains beginning
with `ex` and having the TLD `.com`), and receive back a JSON response
containing the requested data. The data is more or less the same information
that WHOIS provides today, but formatted in a more standardized and
machine-readable manner.
ICANN is sponsoring a one-year [RDAP Pilot
Program](https://www.icann.org/news/announcement-2017-09-05-en), allowing
registries to implement RDAP and, if desired, make modifications to the protocol
to support desired extra features. Nomulus is participating in this program. The
experimental extra features supported by Nomulus are described later.
containing the requested data.
## Nomulus RDAP request endpoints <a id="endpoints"></a>
@@ -60,7 +47,7 @@ the usual App Engine server name. For example, if the App Engine project ID is
`project-id`, the full path for a domain lookup of domain iam.soy would be:
```
https://project-id.appspot.com/rdap/domain/iam.soy
https://pubapi.mydomain.com/rdap/domain/iam.soy
```
The search endpoints (those with a query string) can return more than one match.
@@ -92,8 +79,6 @@ used; Nomulus' wildcard rules are described below.
A maximum of 100 domains will be returned in response to a single query. If more
than one domain is returned, only data about the domain itself is included. If a
single domain is returned, associated nameservers and contacts are also returned
(except that requests not authenticated as associated with the registrar of the
domain will not see contact information).
#### Search by domain name
@@ -160,34 +145,28 @@ Wildcards are not supported for IP address lookup.
### /rdap/entity/ <a id="entity"></a>
Look up a single entity by name. The entity ID is specified at the end of the
path. Two types of entities can be looked up: registrars (looked up by IANA
registrar ID) and contacts (lookup up by ROID). Registrar contacts are also
returned in results as entities, but cannot be looked up by themselves; they
only appear as part of information about a registrar.
path. Because the registry does not store contact data, only registrar entities
may be looked up (by IANA registrar ID). Registrar contacts are also returned in
results as entities, but cannot be looked up by themselves; they only appear as
part of information about a registrar.
```
/rdap/entity/registrar-id
/rdap/entity/ROID
```
### /rdap/entities? <a id="entities"></a>
Search for one or more entities (registrars or contacts). The RDAP specification
supports two kinds of searches: by full name or by handle. In either case,
requests not authenticated as associated with the registrar owning a contact
will not see personal data (name, address, email, phone, etc.) for the contact.
The visibility of registrar information, including registrar contacts, is
determined by the registrar's chosen WHOIS visibility settings.
Search for one or more registrars. The RDAP specification supports two kinds of
searches: by full name or by handle. The visibility of registrar information,
including registrar contacts, is determined by the registrar's chosen WHOIS/RDAP
visibility settings.
#### Search by full name
Entity searches by full name use the `fn` query parameter. Results can include
contacts with a matching name, registrars with a matching registrar name, or
both. For contacts, the name used is the internationalized postal info name, if
present, or the localized postal info name otherwise.
Entity searches by full name use the `fn` query parameter, matching the
registrar name.
```
/rdap/entities?fn=Joseph%20Smith
/rdap/entities?fn=tucows
```
@@ -195,27 +174,16 @@ A trailing wildcard is allowed, but at least two characters must precede the
wildcard.
```
/rdap/entities?fn=Bobby%20Joe*
/rdap/entities?fn=tu*
```
#### Search by handle
Entity searches by handle use the `handle` query parameter. Results can include
contacts with a matching ROID, registrars with a matching IANA registrar number,
or both.
Entity searches by handle use the `handle` query parameter. Results will include
registrars with a matching IANA registrar number.
```
/rdap/entities?handle=12
/rdap/entities?handle=ROID-1234
```
A trailing wildcard is allowed, with at least two character preceding it.
However, wildcard matching is only performed for contacts. Registrars will never
be returned for a wildcard entity search.
```
/rdap/entities?handle=ROID-12*
```
### /rdap/help/ <a id="help"></a>
@@ -267,34 +235,23 @@ the RDAP endpoints.
The RDAP RFCs do not include support for authentication or access controls. We
have implemented an experimental version that allows for authenticated access to
sensitive data such as contact names and addresses (a longtime concern with
WHOIS). We do this by leveraging the existing authentication/authorization
functionality of Nomulus' registrar console. Requests which can be authenticated
as coming from a specific registrar have access to all information about that
registrar's contact. Requests authenticated as coming from administrators of the
App Engine project have access to all contact information. In other cases, the
sensitive data will be hidden, and only the contact ROIDs and roles will be
displayed.
The registrar console uses the App Engine Users API to authenticate users. When
a request comes in, App Engine attempts to authenticate the user's email
address. If authentication is successful, the email address is then checked
against all registrar contacts in the system. If Nomulus is able to find a
matching registrar contact which has the `allow_console_access` permission, the
request is authorized for the associated registrar.
sensitive data such as information about deleted domains. We do this by
leveraging the existing authentication/authorization functionality of Nomulus'
registrar console. Requests which can be authenticated as coming from a specific
registrar have access to all information about that registrar's contact.
Requests authenticated as coming from administrators of the GCP project have
access to all information. In other cases, the sensitive data will be hidden.
RDAP uses the same logic, but the registrar association is used only to
determine whether the request can see sensitive contact information (and deleted
items, as described in the section about the `includeDeleted` parameter).
Unauthenticated requests can still retrieve data, but that data will not be
visible.
determine whether the request can see deleted items. Unauthenticated requests
can still retrieve data, but deleted items will not be visible.
To use RDAP in an authenticated fashion, first set up your email address for use
in the registrar console, as described elsewhere. Then check that you have
access to the console by loading the page:
```
https://project-id.appspot.com/registrar
https://mydomain.com/console
```
If you can see the registrar console, you are logged in correctly. Then change
@@ -303,9 +260,9 @@ see all data for your associated registrar.
### `registrar` parameter <a id="registrar_parameter"></a>
Ordinarily, all matching domains, hosts and contacts are included in the
returned result set. A request can specify that only items owned by a specific
registrar be included, by adding an extra parameter:
Ordinarily, all matching domains and hosts are included in the returned result
set. A request can specify that only items owned by a specific registrar be
included, by adding an extra parameter:
```
/rdap/domains?name=*.tld&registrar=registrar-client-string
@@ -317,9 +274,9 @@ to be shown that otherwise would not.
### `includeDeleted` parameter <a id="includedeleted_parameter"></a>
Ordinarily, deleted domains, hosts and contacts are not included in search
results. Authorized requests can specify that deleted items be included, by
adding an extra parameter:
Ordinarily, deleted domains and hosts are not included in search results.
Authorized requests can specify that deleted items be included, by adding an
extra parameter:
```
/rdap/domains?name=*.tld&includeDeleted=true
@@ -341,14 +298,6 @@ formatted version can be requested by adding an extra parameter:
The result is still valid JSON, but with extra whitespace added to align the
data on the page.
### `subtype` parameter <a id="subtype_parameter"></a>
The subtype parameter is used only for entity searches, to select whether the
results should include contacts, registrars or both. If specified, the subtype
should be 'all', 'contacts' or 'registrars'. Setting the subtype to 'all'
duplicates the normal behavior of returning both. Setting it to 'contacts' or
'registrars' causes an entity search to return only contacts or only registrars.
### Next page links <a id="next_page_links"></a>
The number of results returned in a domain, nameserver or entity search is
@@ -384,7 +333,7 @@ truncated.
{
"type" : "application/rdap+json",
"href" :
"https://ex.com/rdap/domains?name=abc*.tld&cursor=a5927CDb902wE=",
"https://pubapi.mydomain.com/rdap/domains?name=abc*.tld&cursor=a5927CDb902wE=",
"rel" : "next"
}
],
@@ -392,9 +341,3 @@ truncated.
},
...
```
### Additional features
We anticipate adding additional features during the pilot program, such as the
ability to page through search results. We will update the documentation when
these features are implemented.

Some files were not shown because too many files have changed in this diff Show More