1
0
mirror of https://github.com/google/nomulus synced 2026-02-27 15:24:53 +00:00

Compare commits

..

3 Commits

Author SHA1 Message Date
gbrodman
c52983fb61 Update some Nomulus documentation (#2970)
This doesn't update everything -- it leaves out some of the more
complicated changes (architecture, code-structure, configuration,
install, and proxy-setup). Those will require more complete rewrites, so
I'm punting them to a future PR.
2026-02-26 19:05:22 +00:00
Weimin Yu
8a3ab00e58 Apply Fee tag normalization in production (#2968)
Feature verified in Sandbox.
2026-02-25 20:02:37 +00:00
Pavlo Tkach
49df9c325a Update angular @21 (#2965) 2026-02-24 20:08:27 +00:00
71 changed files with 4903 additions and 8515 deletions

View File

@@ -15,7 +15,7 @@
"prefix": "app",
"architect": {
"build": {
"builder": "@angular-devkit/build-angular:application",
"builder": "@angular/build:application",
"options": {
"outputPath": {
"base": "staged/dist/",
@@ -112,7 +112,7 @@
"defaultConfiguration": "production"
},
"serve": {
"builder": "@angular-devkit/build-angular:dev-server",
"builder": "@angular/build:dev-server",
"configurations": {
"production": {
"buildTarget": "console-webapp:build:production"
@@ -136,16 +136,18 @@
"defaultConfiguration": "development"
},
"extract-i18n": {
"builder": "@angular-devkit/build-angular:extract-i18n",
"builder": "@angular/build:extract-i18n",
"options": {
"buildTarget": "console-webapp:build"
}
},
"test": {
"builder": "@angular-devkit/build-angular:karma",
"builder": "@angular/build:karma",
"options": {
"main": "src/test.ts",
"polyfills": "src/polyfills.ts",
"polyfills": [
"src/polyfills.ts"
],
"tsConfig": "tsconfig.spec.json",
"karmaConfig": "karma.conf.js",
"inlineStyleLanguage": "scss",
@@ -183,5 +185,31 @@
"schematicCollections": [
"@angular-eslint/schematics"
]
},
"schematics": {
"@schematics/angular:component": {
"type": "component"
},
"@schematics/angular:directive": {
"type": "directive"
},
"@schematics/angular:service": {
"type": "service"
},
"@schematics/angular:guard": {
"typeSeparator": "."
},
"@schematics/angular:interceptor": {
"typeSeparator": "."
},
"@schematics/angular:module": {
"typeSeparator": "."
},
"@schematics/angular:pipe": {
"typeSeparator": "."
},
"@schematics/angular:resolver": {
"typeSeparator": "."
}
}
}

View File

@@ -1,7 +1,7 @@
{
"/console-api":
{
"target": "http://[::1]:8080",
"target": "http://localhost:8080",
"secure": false,
"logLevel": "debug",
"changeOrigin": true

View File

@@ -21,7 +21,7 @@ module.exports = function (config) {
require('karma-chrome-launcher'),
require('karma-jasmine-html-reporter'),
require('karma-coverage'),
require('@angular-devkit/build-angular/plugins/karma')
],
client: {
jasmine: {

File diff suppressed because it is too large Load Diff

View File

@@ -16,29 +16,29 @@
},
"private": true,
"dependencies": {
"@angular/animations": "^19.1.4",
"@angular/cdk": "^19.1.2",
"@angular/common": "^19.1.4",
"@angular/compiler": "^19.1.4",
"@angular/core": "^19.1.4",
"@angular/forms": "^19.1.4",
"@angular/material": "^19.1.2",
"@angular/platform-browser": "^19.1.4",
"@angular/platform-browser-dynamic": "^19.1.4",
"@angular/router": "^19.1.4",
"@angular/animations": "^21.1.5",
"@angular/cdk": "^21.1.5",
"@angular/common": "^21.1.5",
"@angular/compiler": "^21.1.5",
"@angular/core": "^21.1.5",
"@angular/forms": "^21.1.5",
"@angular/material": "^21.1.5",
"@angular/platform-browser": "^21.1.5",
"@angular/platform-browser-dynamic": "^21.1.5",
"@angular/router": "^21.1.5",
"rxjs": "~7.5.0",
"tslib": "^2.3.0",
"zone.js": "~0.15.0"
},
"devDependencies": {
"@angular-devkit/build-angular": "^19.1.5",
"@angular-eslint/builder": "19.0.2",
"@angular-eslint/eslint-plugin": "19.0.2",
"@angular-eslint/eslint-plugin-template": "19.0.2",
"@angular-eslint/schematics": "19.0.2",
"@angular-eslint/template-parser": "19.0.2",
"@angular/cli": "~19.1.5",
"@angular/compiler-cli": "^19.1.4",
"@angular/build": "^21.1.4",
"@angular/cli": "~21.1.4",
"@angular/compiler-cli": "^21.1.5",
"@types/jasmine": "~4.0.0",
"@types/node": "^18.19.74",
"@typescript-eslint/eslint-plugin": "^7.2.0",
@@ -52,6 +52,6 @@
"karma-jasmine": "~5.1.0",
"karma-jasmine-html-reporter": "~2.0.0",
"prettier": "2.8.7",
"typescript": "^5.7.3"
"typescript": "^5.9.3"
}
}
}

View File

@@ -1,10 +1,9 @@
<div class="console-app mat-typography">
<app-header (toggleNavOpen)="toggleSidenav()"></app-header>
<div class="console-app__global-spinner">
<mat-progress-bar
mode="indeterminate"
*ngIf="globalLoader.isLoading"
></mat-progress-bar>
@if (globalLoader.isLoading) {
<mat-progress-bar mode="indeterminate"></mat-progress-bar>
}
</div>
<mat-sidenav-container class="console-app__container">
<mat-sidenav-content class="console-app__content-wrapper">

View File

@@ -143,13 +143,14 @@
<ng-container matColumnDef="domainName">
<mat-header-cell *matHeaderCellDef>Domain Name</mat-header-cell>
<mat-cell *matCellDef="let element">
@if (getOperationMessage(element.domainName)) {
<mat-icon
*ngIf="getOperationMessage(element.domainName)"
[matTooltip]="getOperationMessage(element.domainName)"
matTooltipPosition="above"
class="primary-text"
>info</mat-icon
>
}
<span>{{ element.domainName }}</span>
</mat-cell>
</ng-container>
@@ -209,9 +210,9 @@
<mat-row *matRowDef="let row; columns: displayedColumns"></mat-row>
<!-- Row shown when there is no matching data. -->
<mat-row *matNoDataRow>
<mat-cell colspan="6">No domains found</mat-cell>
</mat-row>
<tr class="mat-row" *matNoDataRow>
<td class="mat-cell" colspan="6">No domains found</td>
</tr>
</mat-table>
<mat-paginator
[length]="totalResults"

View File

@@ -33,6 +33,7 @@ import {
MatDialogRef,
} from '@angular/material/dialog';
import { RESTRICTED_ELEMENTS } from '../shared/directives/userLevelVisiblity.directive';
import { CdkColumnDef } from '@angular/cdk/table';
interface DomainResponse {
message: string;
@@ -114,6 +115,7 @@ export class ReasonDialogComponent {
templateUrl: './domainList.component.html',
styleUrls: ['./domainList.component.scss'],
standalone: false,
providers: [CdkColumnDef],
})
export class DomainListComponent {
public static PATH = 'domain-list';

View File

@@ -1,14 +1,15 @@
<p class="console-app__header">
<mat-toolbar>
@if (breakpointObserver.isMobileView()) {
<button
mat-icon-button
aria-label="Open navigation menu"
(click)="toggleNavPane()"
*ngIf="breakpointObserver.isMobileView()"
class="console-app__menu-btn"
>
<mat-icon>menu</mat-icon>
</button>
}
<a
[routerLink]="'/home'"
routerLinkActive="active"
@@ -65,7 +66,9 @@
</svg>
</a>
<span class="spacer"></span>
<app-registrar-selector *ngIf="!breakpointObserver.isMobileView()" />
@if (!breakpointObserver.isMobileView()) {
<app-registrar-selector />
}
<button
class="console-app__header-user-icon"
mat-mini-fab
@@ -79,5 +82,7 @@
<button mat-menu-item (click)="logOut()">Log out</button>
</mat-menu>
</mat-toolbar>
<app-registrar-selector *ngIf="breakpointObserver.isMobileView()" />
@if (breakpointObserver.isMobileView()) {
<app-registrar-selector />
}
</p>

View File

@@ -9,41 +9,36 @@
<mat-card>
<mat-card-content>
<mat-list role="list">
<ng-container *ngFor="let item of historyRecords; let last = last">
<mat-list-item class="history-list__item">
<mat-icon
[ngClass]="getIconClass(item.type)"
class="history-list__icon"
>
{{ getIconForType(item.type) }}
</mat-icon>
<div class="history-list__content">
<div class="history-list__description">
<span class="history-list__description--main">{{
item.type
}}</span>
<div>
<mat-chip
*ngIf="parseDescription(item.description).detail"
class="history-list__chip"
>
{{ parseDescription(item.description).detail }}
</mat-chip>
</div>
</div>
<div class="history-list__user">
<b>User - {{ item.actingUser.emailAddress }}</b>
@for (item of historyRecords; track item; let last = $last) {
<mat-list-item class="history-list__item">
<mat-icon
[ngClass]="getIconClass(item.type)"
class="history-list__icon"
>
{{ getIconForType(item.type) }}
</mat-icon>
<div class="history-list__content">
<div class="history-list__description">
<span class="history-list__description--main">{{ item.type }}</span>
<div>
@if (parseDescription(item.description).detail) {
<mat-chip class="history-list__chip">
{{ parseDescription(item.description).detail }}
</mat-chip>
}
</div>
</div>
<span class="history-list__timestamp">
{{ item.modificationTime | date : "MMM d, y, h:mm a" }}
</span>
</mat-list-item>
<mat-divider *ngIf="!last"></mat-divider>
</ng-container>
<div class="history-list__user">
<b>User - {{ item.actingUser.emailAddress }}</b>
</div>
</div>
<span class="history-list__timestamp">
{{ item.modificationTime | date : "MMM d, y, h:mm a" }}
</span>
</mat-list-item>
@if (!last) {
<mat-divider></mat-divider>
} }
</mat-list>
</mat-card-content>
</mat-card>

View File

@@ -17,6 +17,9 @@ import { ComponentFixture, TestBed } from '@angular/core/testing';
import { HomeComponent } from './home.component';
import { MaterialModule } from '../material.module';
import { AppModule } from '../app.module';
import { BackendService } from '../shared/services/backend.service';
import { provideHttpClient } from '@angular/common/http';
import { provideHttpClientTesting } from '@angular/common/http/testing';
describe('HomeComponent', () => {
let component: HomeComponent;
@@ -26,6 +29,11 @@ describe('HomeComponent', () => {
await TestBed.configureTestingModule({
imports: [MaterialModule, AppModule],
declarations: [HomeComponent],
providers: [
BackendService,
provideHttpClient(),
provideHttpClientTesting(),
],
}).compileComponents();
fixture = TestBed.createComponent(HomeComponent);

View File

@@ -12,9 +12,11 @@
[class.active]="router.url.includes(node.path)"
[elementId]="getElementId(node)"
>
<mat-icon class="console-app__nav-icon" *ngIf="node.iconName">
@if (node.iconName) {
<mat-icon class="console-app__nav-icon">
{{ node.iconName }}
</mat-icon>
}
{{ node.title }}
</mat-tree-node>
<mat-nested-tree-node
@@ -34,9 +36,11 @@
{{ treeControl.isExpanded(node) ? "expand_more" : "chevron_right" }}
</mat-icon>
</button>
<mat-icon class="console-app__nav-icon" *ngIf="node.iconName">
@if (node.iconName) {
<mat-icon class="console-app__nav-icon">
{{ node.iconName }}
</mat-icon>
}
{{ node.title }}
</div>
<div

View File

@@ -1,25 +1,28 @@
<h1 class="mat-headline-4">OT&E Status Check</h1>
@if(registrarId() === null) {
<h1>Missing registrarId param</h1>
} @else if(isOte()) {
<h1 *ngIf="oteStatusResponse().length">
} @else if(isOte()) { @if (oteStatusResponse().length) {
<h1>
Status:
<span>{{ oteStatusUnfinished().length ? "Unfinished" : "Completed" }}</span>
</h1>
}
<div class="console-app__ote-status">
@if(oteStatusCompleted().length) {
<div class="console-app__ote-status_completed">
<h1>Completed</h1>
<div *ngFor="let entry of oteStatusCompleted()">
<mat-icon>check_box</mat-icon>{{ entry.description }}
</div>
@for (entry of oteStatusCompleted(); track entry) {
<div><mat-icon>check_box</mat-icon>{{ entry.description }}</div>
}
</div>
} @if(oteStatusUnfinished().length) {
<div class="console-app__ote-status_unfinished">
<h1>Unfinished</h1>
<div *ngFor="let entry of oteStatusUnfinished()">
@for (entry of oteStatusUnfinished(); track entry) {
<div>
<mat-icon>check_box_outline_blank</mat-icon>{{ entry.description }}
</div>
}
</div>
}
</div>

View File

@@ -18,7 +18,7 @@ import { MatSnackBar } from '@angular/material/snack-bar';
import { RegistrarService } from '../registrar/registrar.service';
import { MaterialModule } from '../material.module';
import { SnackBarModule } from '../snackbar.module';
import { CommonModule } from '@angular/common';
import { ActivatedRoute, ParamMap } from '@angular/router';
import { take } from 'rxjs';
@@ -31,7 +31,7 @@ export interface OteStatusResponse {
@Component({
selector: 'app-ote-status',
imports: [MaterialModule, SnackBarModule, CommonModule],
imports: [MaterialModule, SnackBarModule],
templateUrl: './oteStatus.component.html',
styleUrls: ['./oteStatus.component.scss'],
})

View File

@@ -11,9 +11,8 @@
<mat-icon>arrow_back</mat-icon>
</button>
<div class="spacer"></div>
@if(!inEdit && !registrarNotFound) {
@if(!inEdit && !registrarNotFound) { @if (oteButtonVisible) {
<button
*ngIf="oteButtonVisible"
mat-stroked-button
(click)="checkOteStatus()"
aria-label="Check OT&E account status"
@@ -21,6 +20,7 @@
>
Check OT&E Status
</button>
}
<button
mat-flat-button
color="primary"
@@ -39,10 +39,11 @@
<h1>Registrar not found</h1>
} @else {
<h1>{{ registrarInEdit.registrarId }}</h1>
<h2 *ngIf="registrarInEdit.registrarName !== registrarInEdit.registrarId">
@if (registrarInEdit.registrarName !== registrarInEdit.registrarId) {
<h2>
{{ registrarInEdit.registrarName }}
</h2>
@if(inEdit) {
} @if(inEdit) {
<form (ngSubmit)="saveAndClose()">
<div>
<mat-form-field appearance="outline">
@@ -60,15 +61,14 @@
<mat-form-field appearance="outline">
<mat-label>Onboarded TLDs: </mat-label>
<mat-chip-grid #chipGrid aria-label="Enter TLD">
<mat-chip-row
*ngFor="let tld of registrarInEdit.allowedTlds"
(removed)="removeTLD(tld)"
>
@for (tld of registrarInEdit.allowedTlds; track tld) {
<mat-chip-row (removed)="removeTLD(tld)">
{{ tld }}
<button matChipRemove aria-label="'remove ' + tld">
<mat-icon>cancel</mat-icon>
</button>
</mat-chip-row>
}
</mat-chip-grid>
<input
placeholder="New tld..."

View File

@@ -5,15 +5,16 @@
<div class="console-app__registrars-header">
<h1 class="mat-headline-4" forceFocus>Registrars</h1>
<div class="spacer"></div>
@if (oteButtonVisible) {
<button
mat-stroked-button
*ngIf="oteButtonVisible"
(click)="createOteAccount()"
aria-label="Generate OT&E accounts"
[elementId]="getElementIdForOteBlock()"
>
Create OT&E accounts
</button>
}
<button
class="console-app__registrars-new"
mat-flat-button
@@ -43,10 +44,8 @@
class="console-app__registrars-table"
matSort
>
<ng-container
*ngFor="let column of columns"
[matColumnDef]="column.columnDef"
>
@for (column of columns; track column) {
<ng-container [matColumnDef]="column.columnDef">
<mat-header-cell *matHeaderCellDef>
{{ column.header }}
</mat-header-cell>
@@ -55,6 +54,7 @@
[innerHTML]="column.cell(row)"
></mat-cell>
</ng-container>
}
<mat-header-row *matHeaderRowDef="displayedColumns"></mat-header-row>
<mat-row
*matRowDef="let row; columns: displayedColumns"

View File

@@ -22,13 +22,12 @@
</div>
} @else {
<mat-table [dataSource]="dataSource" class="mat-elevation-z0">
<ng-container
*ngFor="let column of columns"
[matColumnDef]="column.columnDef"
>
@for (column of columns; track column) {
<ng-container [matColumnDef]="column.columnDef">
<mat-header-cell *matHeaderCellDef> {{ column.header }} </mat-header-cell>
<mat-cell *matCellDef="let row" [innerHTML]="column.cell(row)"></mat-cell>
</ng-container>
}
<mat-header-row *matHeaderRowDef="displayedColumns"></mat-header-row>
<mat-row
*matRowDef="let row; columns: displayedColumns"

View File

@@ -1,9 +1,5 @@
<div
class="console-app__contact"
*ngIf="contactService.contactInEdit"
cdkTrapFocus
[cdkTrapFocusAutoCapture]="true"
>
@if (contactService.contactInEdit) {
<div class="console-app__contact" cdkTrapFocus [cdkTrapFocusAutoCapture]="true">
<div class="console-app__contact-controls">
<button
mat-icon-button
@@ -32,7 +28,6 @@
</button>
}
</div>
@if(isEditing || contactService.isContactNewView) {
<h1>Contact Details</h1>
<form (ngSubmit)="save($event)">
@@ -46,7 +41,6 @@
[ngModelOptions]="{ standalone: true }"
/>
</mat-form-field>
<mat-form-field appearance="outline">
<mat-label>Primary account email: </mat-label>
<input
@@ -64,7 +58,6 @@
"
/>
</mat-form-field>
<mat-form-field appearance="outline">
<mat-label>Phone: </mat-label>
<input
@@ -74,7 +67,6 @@
placeholder="+0.0000000000"
/>
</mat-form-field>
<mat-form-field appearance="outline">
<mat-label>Fax: </mat-label>
<input
@@ -84,7 +76,6 @@
/>
</mat-form-field>
</section>
<section>
<h1>Contact Type</h1>
<p class="console-app__contact-required">
@@ -92,21 +83,18 @@
(primary contact can't be updated)
</p>
<div class="">
<ng-container
*ngFor="let contactType of contactTypeToTextMap | keyvalue"
@for (contactType of contactTypeToTextMap | keyvalue; track contactType)
{ @if (shouldDisplayCheckbox(contactType.key)) {
<mat-checkbox
[checked]="checkboxIsChecked(contactType.key)"
(change)="checkboxOnChange($event, contactType.key)"
[disabled]="checkboxIsDisabled(contactType.key)"
>
<mat-checkbox
*ngIf="shouldDisplayCheckbox(contactType.key)"
[checked]="checkboxIsChecked(contactType.key)"
(change)="checkboxOnChange($event, contactType.key)"
[disabled]="checkboxIsDisabled(contactType.key)"
>
{{ contactType.value }}
</mat-checkbox>
</ng-container>
{{ contactType.value }}
</mat-checkbox>
} }
</div>
</section>
<section>
<h1>RDAP Preferences</h1>
<div>
@@ -116,7 +104,6 @@
>Show in Registrar RDAP record as admin contact</mat-checkbox
>
</div>
<div>
<mat-checkbox
[(ngModel)]="contactService.contactInEdit.visibleInRdapAsTech"
@@ -124,7 +111,6 @@
>Show in Registrar RDAP record as technical contact</mat-checkbox
>
</div>
<div>
<mat-checkbox
[(ngModel)]="contactService.contactInEdit.visibleInDomainRdapAsAbuse"
@@ -198,15 +184,13 @@
</mat-list-item>
} @if(contactService.contactInEdit.visibleInRdapAsTech) {
<mat-divider></mat-divider>
<mat-list-item
role="listitem"
*ngIf="contactService.contactInEdit.visibleInRdapAsTech"
>
@if (contactService.contactInEdit.visibleInRdapAsTech) {
<mat-list-item role="listitem">
<span class="console-app__list-value"
>Show in Registrar RDAP record as technical contact</span
>
</mat-list-item>
} @if(contactService.contactInEdit.visibleInDomainRdapAsAbuse) {
} } @if(contactService.contactInEdit.visibleInDomainRdapAsAbuse) {
<mat-divider></mat-divider>
<mat-list-item role="listitem">
<span class="console-app__list-value"
@@ -220,3 +204,4 @@
</mat-card>
}
</div>
}

View File

@@ -1,6 +1,6 @@
@if (registrarInEdit) {
<div
class="console-app__rdap-edit"
*ngIf="registrarInEdit"
cdkTrapFocus
[cdkTrapFocusAutoCapture]="true"
>
@@ -12,7 +12,6 @@
>
<mat-icon>arrow_back</mat-icon>
</button>
<div class="console-app__rdap-edit-controls">
<span>
General registrar information for your RDAP record. This information is
@@ -20,10 +19,8 @@
</span>
<div class="spacer"></div>
</div>
<div class="console-app__rdap-edit">
<h1>Personal info</h1>
<form (ngSubmit)="save($event)">
<mat-form-field appearance="outline">
<mat-label>Email: </mat-label>
@@ -34,7 +31,6 @@
[ngModelOptions]="{ standalone: true }"
/>
</mat-form-field>
<mat-form-field appearance="outline">
<mat-label>Phone: </mat-label>
<input
@@ -44,7 +40,6 @@
[ngModelOptions]="{ standalone: true }"
/>
</mat-form-field>
<mat-form-field appearance="outline">
<mat-label>Fax: </mat-label>
<input
@@ -54,7 +49,6 @@
[ngModelOptions]="{ standalone: true }"
/>
</mat-form-field>
<mat-form-field appearance="outline">
<mat-label>Street Address (line 1): </mat-label>
<input
@@ -64,7 +58,6 @@
[(ngModel)]="registrarInEdit.localizedAddress.street![0]"
/>
</mat-form-field>
<mat-form-field appearance="outline">
<mat-label>Street Address (line 2): </mat-label>
<input
@@ -74,7 +67,6 @@
[(ngModel)]="registrarInEdit.localizedAddress.street![1]"
/>
</mat-form-field>
<mat-form-field appearance="outline">
<mat-label>City: </mat-label>
<input
@@ -84,7 +76,6 @@
[(ngModel)]="(registrarInEdit.localizedAddress || {}).city"
/>
</mat-form-field>
<mat-form-field appearance="outline">
<mat-label>State or Province: </mat-label>
<input
@@ -94,7 +85,6 @@
[(ngModel)]="(registrarInEdit.localizedAddress || {}).state"
/>
</mat-form-field>
<mat-form-field appearance="outline">
<mat-label>Country: </mat-label>
<input
@@ -104,7 +94,6 @@
[(ngModel)]="(registrarInEdit.localizedAddress || {}).countryCode"
/>
</mat-form-field>
<mat-form-field appearance="outline">
<mat-label>Postal code: </mat-label>
<input
@@ -114,7 +103,6 @@
[(ngModel)]="(registrarInEdit.localizedAddress || {}).zip"
/>
</mat-form-field>
<button
mat-flat-button
color="primary"
@@ -126,3 +114,4 @@
</form>
</div>
</div>
}

View File

@@ -20,7 +20,7 @@ import { RegistrarService } from 'src/app/registrar/registrar.service';
import { SecurityService } from './security.service';
import { UserDataService } from 'src/app/shared/services/userData.service';
import { MatDialog, MatDialogRef } from '@angular/material/dialog';
import { CommonModule } from '@angular/common';
import { MaterialModule } from 'src/app/material.module';
import { filter, switchMap, take } from 'rxjs';
import { BackendService } from 'src/app/shared/services/backend.service';
@@ -41,7 +41,7 @@ import {
<button mat-button color="warn" (click)="onSave()">Confirm</button>
</mat-dialog-actions>
`,
imports: [CommonModule, MaterialModule],
imports: [MaterialModule],
})
export class ResetEppPasswordComponent {
constructor(public dialogRef: MatDialogRef<ResetEppPasswordComponent>) {}

View File

@@ -68,7 +68,7 @@
>
</mat-list-item>
<mat-divider></mat-divider>
@for (item of dataSource.ipAddressAllowList; track item.value) {
@for (item of dataSource.ipAddressAllowList; track $index){
<mat-list-item role="listitem">
<span class="console-app__list-value">{{ item.value }}</span>
</mat-list-item>

View File

@@ -12,7 +12,13 @@
// See the License for the specific language governing permissions and
// limitations under the License.
import { ComponentFixture, TestBed, waitForAsync } from '@angular/core/testing';
import {
ComponentFixture,
fakeAsync,
TestBed,
tick,
waitForAsync,
} from '@angular/core/testing';
import { provideHttpClient } from '@angular/common/http';
import { provideHttpClientTesting } from '@angular/common/http/testing';
@@ -30,43 +36,32 @@ import { MOCK_REGISTRAR_SERVICE } from 'src/testdata/registrar/registrar.service
describe('SecurityComponent', () => {
let component: SecurityComponent;
let fixture: ComponentFixture<SecurityComponent>;
let fetchSecurityDetailsSpy: Function;
let saveSpy: Function;
let securityServiceStub: Partial<SecurityService>;
beforeEach(async () => {
const securityServiceSpy = jasmine.createSpyObj(SecurityService, [
'fetchSecurityDetails',
'saveChanges',
]);
fetchSecurityDetailsSpy =
securityServiceSpy.fetchSecurityDetails.and.returnValue(of());
saveSpy = securityServiceSpy.saveChanges.and.returnValue(of());
securityServiceStub = {
isEditingSecurity: false,
isEditingPassword: false,
saveChanges: jasmine.createSpy('saveChanges').and.returnValue(of({})),
};
await TestBed.configureTestingModule({
declarations: [SecurityEditComponent, SecurityComponent],
imports: [MaterialModule, BrowserAnimationsModule, FormsModule],
providers: [
BackendService,
SecurityService,
{ provide: SecurityService, useValue: securityServiceStub },
{ provide: RegistrarService, useValue: MOCK_REGISTRAR_SERVICE },
provideHttpClient(),
provideHttpClientTesting(),
],
})
.overrideComponent(SecurityComponent, {
set: {
providers: [
{ provide: SecurityService, useValue: securityServiceSpy },
],
},
})
.compileComponents();
}).compileComponents();
saveSpy = securityServiceStub.saveChanges as jasmine.Spy;
fixture = TestBed.createComponent(SecurityComponent);
component = fixture.componentInstance;
fixture.detectChanges();
});
it('should create', () => {
@@ -93,17 +88,36 @@ describe('SecurityComponent', () => {
});
}));
it('should remove ip', waitForAsync(() => {
component.dataSource.ipAddressAllowList =
component.dataSource.ipAddressAllowList?.splice(1);
fixture.whenStable().then(() => {
fixture.detectChanges();
let listElems: Array<HTMLElement> = Array.from(
fixture.nativeElement.querySelectorAll('span.console-app__list-value')
);
expect(listElems.map((e) => e.textContent)).toContain(
'No IP addresses on file.'
);
it('should remove ip', fakeAsync(() => {
fixture.detectChanges();
tick();
const editBtn = fixture.nativeElement.querySelector(
'button[aria-label="Edit security settings"]'
);
editBtn.click();
tick();
fixture.detectChanges();
const removeIpBtn = fixture.nativeElement.querySelector(
'.console-app__removeIp'
);
removeIpBtn.click();
tick();
fixture.detectChanges();
const saveBtn = fixture.nativeElement.querySelector(
'.settings-security__edit-save'
);
saveBtn.click();
tick();
fixture.detectChanges();
expect(saveSpy).toHaveBeenCalledWith({
ipAddressAllowList: [],
});
}));
@@ -119,21 +133,34 @@ describe('SecurityComponent', () => {
expect(component.securityService.isEditingPassword).toBeTrue();
});
it('should call save', waitForAsync(async () => {
component.editSecurity();
await fixture.whenStable();
it('should call save', fakeAsync(() => {
fixture.detectChanges();
tick();
const editBtn = fixture.nativeElement.querySelector(
'button[aria-label="Edit security settings"]'
);
editBtn.click();
tick();
fixture.detectChanges();
const el = fixture.nativeElement.querySelector(
'.console-app__clientCertificateValue'
);
el.value = 'test';
el.dispatchEvent(new Event('input'));
tick();
fixture.detectChanges();
await fixture.whenStable();
fixture.nativeElement
.querySelector('.settings-security__edit-save')
.click();
expect(saveSpy).toHaveBeenCalledOnceWith({
const saveBtn = fixture.nativeElement.querySelector(
'.settings-security__edit-save'
);
saveBtn.click();
tick();
expect(saveSpy).toHaveBeenCalledWith({
ipAddressAllowList: [{ value: '123.123.123.123' }],
clientCertificate: 'test',
});

View File

@@ -23,9 +23,11 @@
<button
matSuffix
mat-icon-button
class="console-app__removeIp"
[attr.aria-label]="'Remove IP entry ' + ip.value"
(click)="removeIpEntry(ip)"
[disabled]="isUpdating"
type="button"
>
<mat-icon>close</mat-icon>
</button>

View File

@@ -14,9 +14,9 @@
required
autocomplete="current-password"
/>
<mat-error *ngIf="hasError('oldPassword') as errorText">{{
errorText
}}</mat-error>
@if (hasError('oldPassword'); as errorText) {
<mat-error>{{ errorText }}</mat-error>
}
</mat-form-field>
</div>
}
@@ -30,9 +30,9 @@
required
autocomplete="new-password"
/>
<mat-error *ngIf="hasError('newPassword') as errorText">{{
errorText
}}</mat-error>
@if (hasError('newPassword'); as errorText) {
<mat-error>{{ errorText }}</mat-error>
}
</mat-form-field>
</div>
<div class="console-app__password-input-form-field">
@@ -45,9 +45,9 @@
required
autocomplete="new-password"
/>
<mat-error *ngIf="hasError('newPasswordRepeat') as errorText">{{
errorText
}}</mat-error>
@if (hasError('newPasswordRepeat'); as errorText) {
<mat-error>{{ errorText }}</mat-error>
}
</mat-form-field>
</div>
<button

View File

@@ -50,15 +50,17 @@
<mat-icon>delete</mat-icon>
</button>
</div>
<div *ngIf="isNewUser" class="console-app__user-details-save-password">
@if (isNewUser) {
<div class="console-app__user-details-save-password">
<mat-icon>priority_high</mat-icon>
Please save the password. For your security, we do not store passwords in a
recoverable format.
</div>
<p *ngIf="isLoading">
} @if (isLoading) {
<p>
<mat-progress-bar mode="query"></mat-progress-bar>
</p>
}
<mat-card appearance="outlined">
<mat-card-content>

View File

@@ -12,7 +12,6 @@
// See the License for the specific language governing permissions and
// limitations under the License.
import { CommonModule } from '@angular/common';
import { Component, computed } from '@angular/core';
import { MatSnackBar } from '@angular/material/snack-bar';
import { SelectedRegistrarModule } from '../app.module';
@@ -31,7 +30,6 @@ import { UserEditFormComponent } from './userEditForm.component';
FormsModule,
MaterialModule,
SnackBarModule,
CommonModule,
SelectedRegistrarModule,
UserEditFormComponent,
],

View File

@@ -1,6 +1,7 @@
<div class="console-app__user-edit">
<form (ngSubmit)="saveEdit($event)" #form>
<p *ngIf="isNew()">
@if (isNew()) {
<p>
<mat-form-field appearance="outline">
<mat-label
>User name prefix:
@@ -19,6 +20,7 @@
/>
</mat-form-field>
</p>
}
<p>
<mat-form-field appearance="outline">
<mat-label

View File

@@ -12,7 +12,6 @@
// See the License for the specific language governing permissions and
// limitations under the License.
import { CommonModule } from '@angular/common';
import {
Component,
ElementRef,
@@ -50,7 +49,7 @@ import { HttpErrorResponse } from '@angular/common/http';
<button mat-button color="warn" (click)="onSave()">Confirm</button>
</mat-dialog-actions>
`,
imports: [CommonModule, MaterialModule],
imports: [MaterialModule],
})
export class ResetRegistryLockPasswordComponent {
constructor(
@@ -72,7 +71,7 @@ export class ResetRegistryLockPasswordComponent {
selector: 'app-user-edit-form',
templateUrl: './userEditForm.component.html',
styleUrls: ['./userEditForm.component.scss'],
imports: [FormsModule, MaterialModule, CommonModule],
imports: [FormsModule, MaterialModule],
providers: [],
})
export class UserEditFormComponent {

View File

@@ -12,7 +12,6 @@
// See the License for the specific language governing permissions and
// limitations under the License.
import { CommonModule } from '@angular/common';
import { HttpErrorResponse } from '@angular/common/http';
import { Component, effect } from '@angular/core';
import { MatSnackBar } from '@angular/material/snack-bar';
@@ -35,7 +34,6 @@ import { UserEditFormComponent } from './userEditForm.component';
FormsModule,
MaterialModule,
SnackBarModule,
CommonModule,
SelectedRegistrarModule,
UsersListComponent,
UserEditFormComponent,

View File

@@ -5,15 +5,14 @@
class="console-app__users-table"
matSort
>
<ng-container
*ngFor="let column of columns"
[matColumnDef]="column.columnDef"
>
@for (column of columns; track column) {
<ng-container [matColumnDef]="column.columnDef">
<mat-header-cell *matHeaderCellDef>
{{ column.header }}
</mat-header-cell>
<mat-cell *matCellDef="let row" [innerHTML]="column.cell(row)"></mat-cell>
</ng-container>
}
<mat-header-row *matHeaderRowDef="displayedColumns"></mat-header-row>
<mat-row
*matRowDef="let row; columns: displayedColumns"

View File

@@ -12,7 +12,6 @@
// See the License for the specific language governing permissions and
// limitations under the License.
import { CommonModule } from '@angular/common';
import {
Component,
effect,
@@ -43,7 +42,7 @@ export const columns = [
selector: 'app-users-list',
templateUrl: './usersList.component.html',
styleUrls: ['./usersList.component.scss'],
imports: [MaterialModule, CommonModule],
imports: [MaterialModule],
providers: [],
})
export class UsersListComponent {

View File

@@ -12,7 +12,7 @@
// See the License for the specific language governing permissions and
// limitations under the License.
import { enableProdMode } from '@angular/core';
import { enableProdMode, provideZoneChangeDetection } from '@angular/core';
import { platformBrowserDynamic } from '@angular/platform-browser-dynamic';
import { AppModule } from './app/app.module';
@@ -23,5 +23,7 @@ if (environment.production || environment.sandbox) {
}
platformBrowserDynamic()
.bootstrapModule(AppModule)
.bootstrapModule(AppModule, {
applicationProviders: [provideZoneChangeDetection()],
})
.catch((err) => console.error(err));

View File

@@ -14,14 +14,10 @@
"sourceMap": true,
"declaration": false,
"experimentalDecorators": true,
"moduleResolution": "node",
"moduleResolution": "bundler",
"importHelpers": true,
"target": "ES2022",
"module": "es2020",
"lib": [
"es2020",
"dom"
],
"useDefineForClassFields": false
},
"angularCompilerOptions": {

View File

@@ -123,8 +123,7 @@ public class EppXmlTransformer {
public static byte[] marshal(EppOutput root, ValidationMode validation) throws XmlException {
byte[] bytes = marshal(OUTPUT_TRANSFORMER, root, validation);
if (!RegistryEnvironment.PRODUCTION.equals(RegistryEnvironment.get())
&& hasFeeExtension(root)) {
if (hasFeeExtension(root)) {
return FeeExtensionXmlTagNormalizer.normalize(new String(bytes, UTF_8)).getBytes(UTF_8);
}
return bytes;

View File

@@ -235,10 +235,7 @@ public class RdePipelineTest {
persistHostHistory(host1);
Domain helloDomain =
persistEppResource(
newDomain("hello.soy")
.asBuilder()
.addNameserver(host1.createVKey())
.build());
newDomain("hello.soy").asBuilder().addNameserver(host1.createVKey()).build());
persistDomainHistory(helloDomain);
persistHostHistory(persistActiveHost("not-used-subordinate.hello.soy"));
Host host2 = persistActiveHost("ns1.hello.soy");

Binary file not shown.

Before

Width:  |  Height:  |  Size: 35 KiB

After

Width:  |  Height:  |  Size: 35 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 52 KiB

After

Width:  |  Height:  |  Size: 52 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 38 KiB

After

Width:  |  Height:  |  Size: 38 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 93 KiB

After

Width:  |  Height:  |  Size: 93 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 35 KiB

After

Width:  |  Height:  |  Size: 35 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 95 KiB

After

Width:  |  Height:  |  Size: 96 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 38 KiB

After

Width:  |  Height:  |  Size: 38 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 48 KiB

After

Width:  |  Height:  |  Size: 48 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 72 KiB

After

Width:  |  Height:  |  Size: 72 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 42 KiB

After

Width:  |  Height:  |  Size: 42 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 45 KiB

After

Width:  |  Height:  |  Size: 45 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 45 KiB

After

Width:  |  Height:  |  Size: 45 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 74 KiB

After

Width:  |  Height:  |  Size: 74 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 38 KiB

After

Width:  |  Height:  |  Size: 38 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 57 KiB

After

Width:  |  Height:  |  Size: 56 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 62 KiB

After

Width:  |  Height:  |  Size: 62 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 76 KiB

After

Width:  |  Height:  |  Size: 76 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 42 KiB

After

Width:  |  Height:  |  Size: 42 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 76 KiB

After

Width:  |  Height:  |  Size: 76 KiB

View File

@@ -1,13 +1,12 @@
# Admin tool
Nomulus includes a command-line registry administration tool that is invoked
using the `nomulus` command. It has the ability to view and change a large
number of things in a live Nomulus environment, including creating registrars,
updating premium and reserved lists, running an EPP command from a given XML
file, and performing various backend tasks like re-running RDE if the most
Nomulus includes a command-line registry administration tool. It has the ability
to view and change a large number of things in a live Nomulus environment,
including creating registrars, running arbitrary EPP commands from given XML
files, and performing various backend tasks like re-running RDE if the most
recent export failed. Its code lives inside the tools package
(`java/google/registry/tools`), and is compiled by building the `nomulus` target
in the Bazel BUILD file in that package.
(`core/src/main/java/google/registry/tools`), and is compiled by building the
`nomulus` Gradle target in the `core` project, e.g. `./gradlew core:nomulus`.
The tool connects to the Google Cloud Platform project (identified by project
ID) that was configured in your implementation of `RegistryConfig` when the tool
@@ -21,18 +20,25 @@ ID is also "acme-registry", and the project ID for the sandbox environment is
## Build the tool
To build the `nomulus` tool, execute the following `bazel build` command inside
any directory of the codebase. You must rebuild the tool any time that you edit
configuration or make database schema changes.
To build the `nomulus` tool's jarfile, execute the following Gradle command
inside the project's home directory: `./gradlew core:nomulus`. You must rebuild
the tool any time that you edit configuration or make database schema changes.
Note that proper project configuration is necessary for building the tool --
this includes the specialized configuration such as GCP project names.
It's recommended that you alias the compiled jarfile located at
`core/build/libs/nomulus.jar` (or add it to your shell path) so that you can run
it easily, e.g.
```shell
$ bazel build //java/google/registry/tools:nomulus
$ alias nomulus="java -jar core/build/libs/nomulus.jar"
```
It's recommended that you alias the compiled binary located at
`bazel-genfiles/java/google/registry/nomulus` (or add it to your shell path) so
that you can run it easily. The rest of this guide assumes that it has been
aliased to `nomulus`.
The rest of this guide assumes that it has been aliased to `nomulus`.
Note: for Google Registry employees, the nomulus tool is built as part of the
weekly deployment process and the nomulus jarfile is located at
`/google/data/ro/teams/domain-registry/tools/live/nomulus.jar`
## Running the tool
@@ -56,33 +62,27 @@ metadata contained within the code to yield documentation.
## Local and server-side commands
There are two broad ways that commands are implemented: some that send requests
to `ToolsServlet` to execute the action on the server (these commands implement
`ServerSideCommand`), and others that execute the command locally using the
[Remote API](https://cloud.google.com/appengine/docs/java/tools/remoteapi)
(these commands implement `RemoteApiCommand`). Server-side commands take more
work to implement because they require both a client and a server-side
component.
However, they are fully capable of doing anything that is possible with App
Engine, including running a large MapReduce, because they execute on the tools
service in the App Engine cloud.
There are two broad ways that commands are implemented: some send requests to
the backend server to execute the action on the server (these commands implement
`CommandWithConnection`), and others that execute the command locally using
access to the database. Commands that send requests to the backend server are
more work to implement because they require both a client-side and server-side
component, but they are more powerful -- even running Flow pipelines or other
long-running intensive jobs.
Local commands, by contrast, are easier to implement, because there is only a
local component to write, but they aren't as powerful. A general rule of thumb
for making this determination is to use a local command if possible, or a
server-side command otherwise.
Local commands are easier to implement (because there is only a local component
to write) but they aren't as powerful. As a rule of thumb, use a local command
if possible.
## Common tool patterns
All tools ultimately implement the `Command` interface located in the `tools`
package. If you use an integrated development environment (IDE) such as IntelliJ
to view the type hierarchy of that interface, you'll see all of the commands
that exist, as well as how a lot of them are grouped using sub-interfaces or
abstract classes that provide additional functionality. The most common patterns
that are used by a large number of other tools are:
to view the type hierarchy of that interface, you'll see all the commands that
exist, as well as how a lot of them are grouped using sub-interfaces or abstract
classes that provide additional functionality. The most common patterns that are
used by a large number of other tools are:
* **`BigqueryCommand`** -- Provides a connection to BigQuery for tools that
need it.
* **`ConfirmingCommand`** -- Provides the methods `prompt()` and `execute()`
to override. `prompt()` outputs a message (usually what the command is going
to do) and prompts the user to confirm execution of the command, and then
@@ -90,10 +90,9 @@ that are used by a large number of other tools are:
* **`EppToolCommand`** -- Commands that work by executing EPP commands against
the server, usually by filling in a template with parameters that were
passed on the command-line.
* **`MutatingEppToolCommand`** -- A sub-class of `EppToolCommand` that
provides a `--dry_run` flag, that, if passed, will display the output from
the server of what the command would've done without actually committing
those changes.
* **`MutatingEppToolCommand`** -- A subclass of `EppToolCommand` that provides
a `--dry_run` flag, that, if passed, will display the output from the server
of what the command would've done without actually committing those changes.
* **`GetEppResourceCommand`** -- Gets individual EPP resources from the server
and outputs them.
* **`ListObjectsCommand`** -- Lists all objects of a specific type from the

View File

@@ -8,61 +8,41 @@ requests will be authorized to invoke the action.
## Authentication and authorization properties
The `auth` attribute is an enumeration. Each value of the enumeration
corresponds to a triplet of properties:
corresponds to a pair of properties:
* the *authentication methods* allowed by the action
* the *minimum authentication level* which is authorized to run the action
* the *user policy* for the action
* the *minimum authentication level* which is authorized to run the action
* the *user policy* for the action
### Authentication methods
### Authentication Levels
Authentication methods are ways whereby the request can authenticate itself to
the system. In the code, an *authentication mechanism* is a class which handles
a particular authentication method. There are currently three methods:
There exist three levels of authentication level:
* `INTERNAL`: used by requests generated from App Engine task queues; these
requests do not have a user, because they are system-generated, so
authentication consists solely of verifying that the request did indeed
come from a task queue
* `NONE`: no authentication was found
* `APP`: the request was authenticated, but no user was present
* `USER`: the request was authenticated with a specific user
* `API`: authentication using an API; the Nomulus release ships with one API
authentication mechanism, OAuth 2, but you can write additional custom
mechanisms to handle other protocols if needed
`NONE` and `USER` are fairly straightforward results (either no authentication
was present, or a user was present), but `APP` is a bit of a special case. It
exists for requests coming from service accounts, Cloud Scheduler, or the
proxy -- requests which are authenticated but don't necessarily come from any
one particular "user" per se. That being said, authorized users *can* manually
run these tasks; it's just that service accounts can too.
* `LEGACY`: authentication using the standard App Engine `UserService` API,
which authenticates based on cookies and XSRF tokens
The details of the associated authentication mechanism classes are given later.
### Authentication levels
Each authentication method listed above can authenticate at one of three levels:
* `NONE`: no authentication was found
* `APP`: the request was authenticated, but no user was present
* `USER`: the request was authenticated with a specific user
For instance, `INTERNAL` authentication never returns an authentication level of
`USER`, because internal requests generated from App Engine task queues do not
execute as a particular end user account. `LEGACY` authentication, on the other
hand, never returns an authentication level of `APP`, because authentication is
predicated on identifying the user, so the only possible answers are `NONE` and
`USER`.
Each action has a minimum request authentication level. Some actions are
completely open to the public, and have a minimum level of `NONE`. Some require
authentication but not a user, and have a minimum level of `APP`. And some
cannot function properly without knowing the exact user, and have a minimum
level of `USER`.
Each action has a minimum request authentication level. Some actions (e.g. RDAP)
are completely open to the public, and have a minimum level of `NONE`. Some
require authentication but not necessarily a user, and have a minimum level of
`APP`. And some cannot function properly without knowing the exact user, and
have a minimum level of `USER`.
### User policy
The user policy indicates what kind of user is authorized to execute the action.
There are three possible values:
There are two possible values:
* `IGNORED`: the user information is ignored
* `PUBLIC`: an authenticated user is required, but any user will do
* `ADMIN`: there must be an authenticated user with admin privileges
* `PUBLIC`: an authenticated user is required, but any user will do
(authorization is done at a later state)
* `ADMIN`: there must be an authenticated user with admin privileges (this
includes service accounts)
Note that the user policy applies only to the automatic checking done by the
framework before invoking the action. The action itself may do more checking.
@@ -73,10 +53,6 @@ whether a user was found. If not, it issues a redirect to the login page.
Likewise, other pages of the registrar console have a user policy of `PUBLIC`,
meaning that any logged-in user can access the page. However, the code then
looks up the user to make sure he or she is associated with a registrar.
Admins can be granted permission to the registrar console by configuring a
special registrar for internal admin use, using the `registryAdminClientId`
setting. See the [global configuration
guide](./configuration.md#global-configuration) for more details.
Also note that the user policy only applies when there is actually a user. Some
actions can be executed either by an admin user or by an internal request coming
@@ -87,64 +63,41 @@ require that there be a user, set the minimum authentication level to `USER`.
### Allowed authentication and authorization values
Not all triplets of the authentication method, minimum level and user policy
make sense. A master enumeration lists all the valid triplets. They are:
There are three pairs of authentication level + user policy that are used in
Nomulus (or even make sense). These are:
* `AUTH_PUBLIC_ANONYMOUS`: Allow all access, and don't attempt to authenticate.
The only authentication method is `INTERNAL`, with a minimum level of
`NONE`. Internal requests will be flagged as such, but everything else
passes the authorization check with a value of `NOT_AUTHENTICATED`.
* `AUTH_PUBLIC`: Allow all access, but attempt to authenticate the user. All
three authentication methods are specified, with a minimum level of `NONE`
and a user policy of `PUBLIC`. If the user can be authenticated by any
means, the identity is passed to the request. But if not, the request still
passes the authorization check, with a value of `NOT_AUTHENTICATED`.
* `AUTH_PUBLIC_LOGGED_IN`: Allow access only by authenticated users. The
`API` and `LEGACY` authentication methods are supported, but not `INTERNAL`,
because that does not identify a user. The minimum level is `USER`, with a
user policy of `PUBLIC`. Only requests with a user authenticated via either
the legacy, cookie-based method or an API method (e.g. OAuth 2) are
authorized to run the action.
* `AUTH_INTERNAL_OR_ADMIN`: Allow access only by admin users or internal
requests. This is appropriate for actions that should only be accessed by
someone trusted (as opposed to anyone with a Google login). This currently
allows only the `INTERNAL` and `API` methods, meaning that an admin user
cannot authenticate themselves via the legacy authentication mechanism,
which is used only for the registrar console. The minimum level is `APP`,
because we don't require a user for internal requests, but the user policy
is `ADMIN`, meaning that if there *is* a user, it needs to be an admin.
* `AUTH_PUBLIC_OR_INTERNAL`: Allows anyone access, as long as they use OAuth to
authenticate. Also allows access from App Engine task-queue. Note that OAuth
client ID still needs to be allow-listed in the config file for OAuth-based
authentication to succeed. This is mainly used by the proxy.
* `AUTH_PUBLIC`: Allow all access and don't attempt to authenticate. This is
used for completely public endpoints such as RDAP.
* `AUTH_PUBLIC_LOGGED_IN`: Allow access only by users authenticated with some
type of OAuth token. This allows all users (`UserPolicy.PUBLIC`) but
requires that a particular user exists and is logged in (`AuthLevel.USER`).
This is used primarily for the registrar console.
* `AUTH_ADMIN`: Allow access only by admin users or internal requests
(including Cloud Scheduler tasks). This is appropriate for actions that
should only be accessed by someone trusted (as opposed to anyone with a
Google login). This permits app-internal authentication (`AuthLevel.APP`)
but if a user is present, it must be an admin (`UserPolicy.ADMIN`). This is
used by many automated requests, as well as the proxy.
### Action setting golden files
To make sure that the authentication and authorization settings are correct for
all actions, a unit test uses reflection to compare all defined actions for a
specific service to a golden file containing the correct settings. These files
are:
To make sure that the authentication and authorization settings are correct and
expected for all actions, a unit test uses reflection to compare all defined
actions for a specific service to a
[golden file](https://github.com/google/nomulus/blob/master/core/src/test/resources/google/registry/module/routing.txt)
containing the correct settings.
* `frontend_routing.txt` for the default (frontend) service
* `backend_routing.txt` for the backend service
* `tools_routing.txt` for the tools service
Each line in the file lists a path, the class that handles that path, the
allowable HTTP methods (meaning GET and POST, as opposed to the authentication
methods described above), the value of the `automaticallyPrintOk` attribute (not
relevant for purposes of this document), and the two authentication and
authorization settings described above. Whenever actions are added, or their
attributes are modified, the golden file needs to be updated.
Each of these files consists of lines listing a path, the class that handles
that path, the allowable HTTP methods (meaning GET and POST, as opposed to the
authentication methods described above), the value of the `automaticallyPrintOk`
attribute (not relevant for purposes of this document), and the three
authentication and authorization settings described above. Whenever actions are
added, or their attributes are modified, the golden files need to be updated.
The golden files also serve as a convenient place to check out how things are
set up. For instance, the tools actions are, for the most part, accessible to
admins and internal requests only. The backend actions are mostly accessible
only to internal requests. And the frontend actions are a grab-bag; some are
open to the public, some to any user, some only to admins, etc.
The golden file also serves as a convenient place to check out how things are
set up. For instance, the backend actions are accessible to admins and internal
requests only, the pubapi requests are open to the public, and console requests
require an authenticated user.
### Example
@@ -156,11 +109,12 @@ body rather than the URL itself (which could be logged). Therefore, the class
definition looks like:
```java
@Action(
path = "/_dr/epp",
method = Method.POST,
auth = Auth.AUTH_INTERNAL_OR_ADMIN
)
service = Action.Service.FRONTEND,
path = "/_dr/epp",
method = Method.POST,
auth = Auth.AUTH_ADMIN)
public class EppTlsAction implements Runnable {
...
```
@@ -169,8 +123,8 @@ and the corresponding line in frontend_routing.txt (including the header line)
is:
```shell
PATH CLASS METHODS OK AUTH_METHODS MIN USER_POLICY
/_dr/epp EppTlsAction POST n INTERNAL,API APP ADMIN
SERVICE PATH CLASS METHODS OK MIN USER_POLICY
FRONTEND /_dr/epp EppTlsAction POST n APP ADMIN
```
## Implementation
@@ -178,16 +132,12 @@ PATH CLASS METHODS OK AUTH_METHODS MIN USER_POLICY
The code implementing the authentication and authorization framework is
contained in the `google.registry.request.auth` package. The main method is
`authorize()`, in `RequestAuthenticator`. This method takes the auth settings
and an HTTP request, and tries to authenticate and authorize the request using
any of the specified methods, returning the result of its attempts. Note that
failed authorization (in which case `authorize()` returns `Optional.absent()`)
is different from the case where nothing can be authenticated, but the action
does not require any; in that case, `authorize()` succeeds, returning the
special result AuthResult.NOT_AUTHENTICATED.
There are separate classes (described below) for the mechanism which handles
each authentication method. The list of allowable API authentication mechanisms
(by default, just OAuth 2) is configured in `AuthModule`.
and an HTTP request, and tries to authenticate and authorize the request,
returning the result of its attempts. Note that failed authorization (in which
case `authorize()` returns `Optional.absent()`) is different from the case where
nothing can be authenticated, but the action does not require any; in that case,
`authorize()` succeeds, returning the special result
AuthResult.NOT_AUTHENTICATED.
The ultimate caller of `authorize()` is
`google.registry.request.RequestHandler`, which is responsible for routing
@@ -196,83 +146,51 @@ appropriate action, and making sure that the incoming HTTP method is appropriate
for the action, it calls `authorize()`, and rejects the request if authorization
fails.
### `LegacyAuthenticationMechanism`
### Authentication methods
Legacy authentication is straightforward, because the App Engine `UserService`
API does all the work. Because the protocol might be vulnerable to an XSRF
attack, the authentication mechanism issues and checks XSRF tokens as part
of the process if the HTTP method is not GET or HEAD.
Nomulus requests are authenticated via OIDC token authentication, though these
tokens can be created and validated in two ways. In each case, the
authentication mechanism converts an HTTP request to an authentication result,
which consists of an authentication level, a possible user object, and a
possible service account email.
### `OAuthAuthenticationMechanism`
#### `IapOidcAuthenticationMechanism`
OAuth 2 authentication is performed using the App Engine `OAuthService` API.
There are three Nomulus configuration values involved:
Most requests, e.g. the registrar console or Nomulus CLI requests) are routed
through GCP's
[Identity-Aware Proxy](https://docs.cloud.google.com/iap/docs/concepts-overview).
This forces the user to log in to some GAIA account (specifically, one that is
given access to the project). We attempt to validate a provided IAP OIDC token
with the IAP issuer URL (`https://cloud.google.com/iap`) and the proper IAP
audience (`/projects/{projectId}/global/backendServices/{serviceId}`), where
`projectId` refers to the GCP project, and `serviceId` refers to the service ID
retrievable from the
[IAP configuration page](https://pantheon.corp.google.com/security/iap).
Ideally, this service ID corresponds to the HTTPS load balancer that distributes
requests to the GKE pods.
* `availableOauthScopes` is the set of OAuth scopes passed to the service to
be checked for their presence.
Note: the local Nomulus CLI's
[LoginCommand](https://github.com/google/nomulus/blob/master/core/src/main/java/google/registry/tools/LoginCommand.java)
uses a special-case form of this where it saves long-lived IAP credentials
locally.
* `requiredOauthScopes` is the set of OAuth scopes which must be present. This
should be a subset of the available scopes. All scopes in this set must be
present for authentication to succeed.
#### `RegularOidcAuthenticationMechanism`
* `allowedOauthClientIds` is the set of allowable OAuth client IDs. Any client
ID in this set is sufficient for successful authentication.
Service account requests ( e.g.
[Cloud Scheduler jobs](https://docs.cloud.google.com/scheduler/docs/schedule-run-cron-job))
or requests coming through the proxy use a non-IAP OIDC token provided by the
caller. These requests have a different issuer URL (
`https://accounts.google.com`) and use the fairly standard OAuth bearer token
architecture -- an `Authorization` HTTP header of the form "Bearer: XXXX".
The code looks for an `Authorization` HTTP header of the form "BEARER XXXX...",
containing the access token. If it finds one, it calls `OAuthService` to
validate the token, check that the scopes and client ID match, and retrieve the
flag indicating whether the user is an admin.
### Configuration
### `AppEngineInternalAuthenticationMechanism`
Detection of internal requests is a little hacky. App Engine uses a special HTTP
header, `X-AppEngine-QueueName`, to indicate the queue from which the request
originates. If this header is present, internal authentication succeeds. App
Engine normally strips this header from external requests, so only internal
requests will be authenticated.
App Engine has a special carve-out for admin users, who are allowed to specify
headers which do not get stripped. So an admin user can use a command-line
utility like `curl` to craft a request which appears to Nomulus to be an
internal request. This has proven to be useful, facilitating the testing of
actions which otherwise could only be run via a dummy cron job.
However, it only works if App Engine can authenticate the user as an admin via
the `UserService` API. OAuth won't work, because authentication is performed by
the Nomulus code, and the headers will already have been stripped by App Engine
before the request is executed. Only the legacy, cookie-based method will work.
Be aware that App Engine defines an "admin user" as anyone with access to the
App Engine project, even those with read-only access.
## Other topics
### OAuth 2 not supported for the registry console
Currently, OAuth 2 is only supported for requests which specify the
`Authorization` HTTP header. The OAuth code reads this header and passes it to
the Google OAuth server (no other authentication servers are currently
supported) to verify the user's identity. This works fine for the `nomulus`
command-line tool.
It doesn't work for browser-based interactions such as the registrar console.
For that, we will (we think) need to redirect the user to the authentication
server, and upon receiving the user back, fish out the code and convert it to a
token which we store in a cookie. None of this is particularly hard, but for the
moment it seems easier to stick with the legacy App Engine UserService API. Of
course, contributions from the open-source community are welcome. :)
### Authorization via `web.xml`
Before the modern authentication and authorization framework described in this
document was put in place, Nomulus used to be protected by directives in the
`web.xml` file which allowed only logged-in users to access most endpoints. This
had the advantage of being very easy to implement, but it came with some
drawbacks, the primary one being lack of support for OAuth 2. App Engine's
standard login detection works fine when using a browser, but does not handle
cases where the request is coming from a standalone program such as the
`nomulus` command-line tool. By moving away from the `web.xml` approach, we
gained more flexibility to support an array of authentication and authorization
schemes, including custom ones developed by the Nomulus community, at the
expense of having to perform the authentication and authorization ourselves in
the code.
The `auth` block of the configuration requires two fields: *
`allowedServiceAccountEmails` is the list of service accounts that should be
allowed to run tasks when internally authenticated. This will likely include
whatever service account runs Nomulus in Google Kubernetes Engine, as well as
the Cloud Scheduler service account. * `oauthClientId` is the OAuth client ID
associated with IAP. This is retrievable from the
[Clients page](https://pantheon.corp.google.com/auth/clients) of GCP after
enabling the Identity-Aware Proxy. It should look something like
`someNumbers-someNumbersAndLetters.apps.googleusercontent.com`

View File

@@ -1,82 +1,28 @@
# Coding FAQ
This file is a motley assortment of informational emails generated in response
to questions from development partners.
## How do I mock Google Cloud Storage in tests?
AppEngine's GCS client automatically switches over to a local implementation,
[`GcsServiceFactory`](https://github.com/GoogleCloudPlatform/appengine-gcs-client/blob/master/java/src/main/java/com/google/appengine/tools/cloudstorage/GcsServiceFactory.java#L61),
for tests.
So rather than mocking GCS-related stuff at all, just use the fake local
implementation. This is what our tests should be doing; see
[`ExportCommitLogDiffActionTest`](https://github.com/google/nomulus/blob/master/javatests/google/registry/backup/ExportCommitLogDiffActionTest.java#L70).
Very rarely there have been cases where we've needed something beyond that (e.g.
to test against GCS being eventually consistent). In that case, rather than
mocking GcsUtils, you'd need to create a real instance of it but pass in a
mocked-out GcsService instance. Those are a bit of a pain to make since
GcsServiceImpl itself is also final (and not code we control), but you could
roll your own implementation by hand, or cheat and use a reflective proxy, as we
do in
[`GcsDiffFileListerTest`](https://github.com/google/domain-registry/blob/master/javatests/google/registry/backup/GcsDiffFileListerTest.java#L112).
## How do I test authentication on the SDK Development Server?
*Can someone explain how `GaeUserIdConverter.convertEmailAddressToGaeUserId()`
actually does the conversion? I see it's doing a save/load(/delete) of a
`GaeUserIdConverter`, which contains a `User`. Does Objectify do some magic on
load to look up the real GAE user ID from the email address? In trying to get
the registry to run in the SDK Development Server, I am seeing the wrong user ID
when adding a new RegistryContact using the command line tool.*
The [App Engine development
server](https://cloud.google.com/appengine/docs/python/tools/using-local-server)
is not particularly robust; it appears that it always returns userId
185804764220139124118 for any authenticated user, as per [this StackOverflow
thread](http://stackoverflow.com/questions/30524328/what-user-is-provided-by-app-engine-devserver).
For testing purposes, it might suffice to just create a RegistrarContact with
that userId by hand somehow, so that you can log in. In the longer term, if we
switch to Google Sign-In, this specific problem would go away, but based on the
above, it looks like OAuthService doesn't really work on the dev server either.
So for testing actual "real" authentication, you'd want to use an alpha instance
rather than the development server. We don't use the development server very
much internally for this reason.
## Do you support RDAP?
We provide an implementation of the Registry Data Access Protocol (RDAP) which provides
similar data to the outdated WHOIS protocol, but in a structured format. The
standard is defined in STD 95 and its RFCs:
We provide an implementation of the Registry Data Access Protocol (RDAP) which
provides similar data to the outdated WHOIS protocol, but in a structured
format. The standard is defined in STD 95 and its RFCs:
* [RFC 7480: HTTP Usage in the Registration Data Access Protocol
(RDAP)](https://tools.ietf.org/html/rfc7480)
* [RFC 7480: HTTP Usage in the Registration Data Access Protocol (RDAP)](https://tools.ietf.org/html/rfc7480)
* [RFC 7481: Security Services for the Registration Data Access Protocol
(RDAP)](https://tools.ietf.org/html/rfc7481)
* [RFC 9082: Registration Data Access Protocol (RDAP) Query
Format](https://tools.ietf.org/html/rfc9082)
* [RFC 9083: JSON Responses for the Registration Data Access Protocol
(RDAP)](https://tools.ietf.org/html/rfc9083)
* [RFC 9224: Finding the Authoritative Registration Data (RDAP)
Service](https://tools.ietf.org/html/rfc9224)
* [RFC 9082: Registration Data Access Protocol (RDAP) Query Format](https://tools.ietf.org/html/rfc9082)
* [RFC 9083: JSON Responses for the Registration Data Access Protocol (RDAP)](https://tools.ietf.org/html/rfc9083)
* [RFC 9224: Finding the Authoritative Registration Data (RDAP) Service](https://tools.ietf.org/html/rfc9224)
If you access this endpoint on a running Nomulus system:
`https://{PROJECT-ID}.appspot.com/rdap/domains?name=ex*`
`https://pubapi.{SERVER_URL}/rdap/domains?name=ex*`
it should search for all domains that start with "ex", returning the results in
JSON format. This functionality is still under development, so it is quite
possible that the format of returned data will change over time, but the basic
structure should be the same, as defined by RFCs 7480 through 7484. Request
paths which ought to mostly work (though no guarantees yet):
JSON format. Request paths which ought to work:
```
/rdap/domain/abc.tld
/rdap/nameserver/ns1.abc.tld
/rdap/entity/ROID
/rdap/entity/registrar-iana-identifier
/rdap/domains?name=abc.tld
/rdap/domains?name=abc*
@@ -86,8 +32,6 @@ paths which ought to mostly work (though no guarantees yet):
/rdap/domains?nsIp=1.2.3.4
/rdap/nameservers?name=ns*.abc.tld
/rdap/nameservers?ip=1.2.3.4
/rdap/entities?fn=John*
/rdap/entities?handle=ROI*
/rdap/entities?handle=registrar-iana-identifier
```

View File

@@ -1,37 +0,0 @@
# Developing
This document contains advice on how to do development on the Nomulus codebase,
including how to set up an IDE environment and run tests.
## Running a local development server
`RegistryTestServer` is a lightweight test server for the registry that is
suitable for running locally for development. It uses local versions of all
Google Cloud Platform dependencies, when available. Correspondingly, its
functionality is limited compared to a Nomulus instance running on an actual App
Engine instance. It is most helpful for doing web UI development such as on the
registrar console: it allows you to update JS, CSS, images, and other front-end
resources, and see the changes instantly simply by refreshing the relevant page
in your browser.
To see the registry server's command-line parameters, run:
```shell
$ bazel run //javatests/google/registry/server -- --help
```
To start an instance of the server, run:
```shell
$ bazel run //javatests/google/registry/server {your params}
```
Once it is running, you can interact with it via normal `nomulus` commands, or
view the registrar console in a web browser by navigating to
[http://localhost:8080/registrar](http://localhost:8080/registrar). The server
will continue running until you terminate the process.
If you are adding new URL paths, or new directories of web-accessible resources,
you will need to make the corresponding changes in `RegistryTestServer`. This
class contains all of the routing and static file information used by the local
development server.

View File

@@ -3,8 +3,8 @@
This document covers the first steps of creating some test entities in a newly
deployed and configured testing environment. It isn't required, but it does help
gain familiarity with the system. If you have not already done so, you must
first complete [installation](./install.md) and [initial
configuration](./configuration.md).
first complete [installation](./install.md) and
[initial configuration](./configuration.md).
Note: Do not create these entities on a production environment! All commands
below use the [`nomulus` admin tool](./admin-tool.md) to interact with the
@@ -12,55 +12,30 @@ running registry system. We'll assume that all commands below are running in the
`alpha` environment; if you named your environment differently, then use that
everywhere that `alpha` appears.
## Temporary extra steps
Using the `nomulus` admin tool currently requires two additional steps to enable
full functionality. These steps should _not_ be done for a production
deployment - a suitable solution for production is in progress.
Modify the `tools` module `web.xml` file to remove admin-only restrictions.
Look for the `<auth-constraint>admin</auth-constraint>` element. Comment out
this element, and redeploy the tools module to your live app.
[app-default-creds]: https://developers.google.com/identity/protocols/application-default-credentials
## Create a TLD
Pick the name of a TLD to create. For the purposes of this example we'll use
"example", which conveniently happens to be an ICANN reserved string, meaning
it'll never be created for real on the Internet at large.
it'll never be created for real on the Internet at large. Then,
[create a TLD](operational-procedures/modifying-tlds.md) using the
[example template](https://github.com/google/nomulus/blob/master/core/src/test/resources/google/registry/tools/tld.yaml)
as a guide.
The fields you'll want to change from the template: * `driveFolderId` should be
null * `roidSuffix` should be `EXAMPLE` -- this is the suffix that will be used
for repository ids of domains on the TLD. This suffix must be all uppercase and
a maximum of eight ASCII characters and can be set to the upper-case equivalent
of our TLD name (if it is 8 characters or fewer), such as "EXAMPLE." You can
also abbreviate the upper-case TLD name down to 8 characters. Refer to the
[gTLD Registry Advisory: Correction of non-compliant ROIDs][roids] for further
information. * `tldStr` should be `example` * `tldType` should be `TEST`, which
identifies that the TLD is for testing purposes, whereas `REAL` would identify
the TLD as a live TLD
```shell
$ nomulus -e alpha create_tld example --roid_suffix EXAMPLE \
--initial_tld_state GENERAL_AVAILABILITY --tld_type TEST \
--dns_writers VoidDnsWriter
[ ... snip confirmation prompt ... ]
Perform this command? (y/N): y
Updated 1 entities.
$ nomulus -e alpha configure_tld --input=example.yaml
```
* `-e` is the environment name (`alpha` in this example).
* `create_tld` is the subcommand to create a TLD. The TLD name is "example"
which happens to be an ICANN reserved string, and therefore "example" can
never be created on the Internet at large.
* `--initial_tld_state` defines the initial state of the TLD.
`GENERAL_AVAILABILITY`, in the case of our example, allows you to
immediately create domain names by bypassing the sunrise and landrush domain
registration periods.
* `--tld_type` is the type of TLD. `TEST` identifies that the TLD is for
testing purposes, where `REAL` identifies the TLD is a live TLD.
* `roid_suffix` is the suffix that will be used for repository ids of domains
on the TLD. This suffix must be all uppercase and a maximum of eight ASCII
characters and can be set to the upper-case equivalent of our TLD name (if
it is 8 characters or fewer), such as "EXAMPLE." You can also abbreviate the
upper-case TLD name down to 8 characters. Refer to the [gTLD Registry
Advisory: Correction of non-compliant ROIDs][roids] for further information.
* `--dns_writers` is the list of DNS writer modules that specify how changes
to domains for the TLD are communicated to actual DNS servers. We use
`VoidDnsWriter` in this case so as to not have to set up DNS. Typically
one might use CloudDnsWriter (for Google Cloud DNS) or implement your own
solution.
## Create a registrar
Now we need to create a registrar and give it access to operate on the example
@@ -96,36 +71,6 @@ Where:
* `--allowed_tlds` is a comma-delimited list of top level domains where this
registrar has access.
## Create a contact
Now we want to create a contact, as a contact is required before a domain can be
created. Contacts can be used on any number of domains across any number of
TLDs, and contain the information on who owns or provides technical support for
a TLD. These details will appear in WHOIS queries.
```shell
$ nomulus -e alpha create_contact -c acme --id abcd1234 \
--name 'John Smith' --street '234 Fake St' --city 'North Fakington' \
--state MA --zip 23456 --cc US --email jsmith@e.mail
[ ... snip EPP response ... ]
```
Where:
* `create_contact` is the subcommand to create a contact.
* `-c` is used to define the registrar. The `-c` option is used with most
`registry_tool` commands to specify the id of the registrar executing the
command. Contact, domain, and host creation all work by constructing an EPP
message that is sent to the registry, and EPP commands need to run under the
context of a registrar. The "acme" registrar that was created above is used
for this purpose.
* `--id` is the contact id, and is referenced elsewhere in the system (e.g.
when a domain is created and the admin contact is specified).
* `--name` is the display name of the contact, which is usually the name of a
company or of a person.
The address and `email` fields are required to create a contact.
## Create a host
Hosts are used to specify the IP addresses (either v4 or v6) that are associated
@@ -156,8 +101,7 @@ To tie it all together, let's create a domain name that uses the above contact
and host.
```shell
$ nomulus -e alpha create_domain fake.example --client acme --admins abcd1234 \
--techs abcd1234 --registrant abcd1234 --nameservers ns1.google.com
$ nomulus -e alpha create_domain fake.example --client acme --nameservers ns1.google.com
[ ... snip EPP response ... ]
```
@@ -166,26 +110,19 @@ Where:
* `create_domain` is the subcommand to create a domain name. It accepts a
whitespace-separated list of domain names to be created
* `--client` is used to define the registrar.
* `--admins` is the administrative contact's id(s).
* `--techs` is the technical contact's id(s).
* `--registrant` is the registrant contact's id.
* `--nameservers` is a comma-separated list of hosts.
Note how the same contact id is used for the administrative, technical, and
registrant contact. It is common for domain names to use the same details for
all contacts on a domain name.
## Verify test entities using RDAP
## Verify test entities using WHOIS
To verify that everything worked, let's query the WHOIS information for
To verify that everything worked, let's query the RDAP information for
fake.example:
```shell
$ nomulus -e alpha whois_query fake.example
[ ... snip WHOIS response ... ]
$ nomulus -e alpha rdap_query fake.example
[ ... snip RDAP response ... ]
```
You should see all of the information in WHOIS that you entered above for the
contact, nameserver, and domain.
You should see all the information in RDAP that you entered above for the
nameserver and domain.
[roids]: https://www.icann.org/resources/pages/correction-non-compliant-roids-2015-08-26-en

View File

@@ -22,15 +22,17 @@ To upgrade to a new Gradle version for this project, use:
gradle wrapper --gradle-version version-number
```
## Deploy to AppEngine
## Deploy to GCP Test Projects
Use the Gradle task 'appengineDeploy' to build and deploy to AppEngine. For now
you must update the appengine.deploy.project in build.gradle to your
GCP project ID.
If your [configuration](configuration.md) is up to date with the proper test
projects configured, you can deploy to GCP through the Gradle command line.
To deploy the Gradle build, you will need the Google Cloud SDK and its
app-engine-java component.
Use the Gradle task `deployNomulus` to build and deploy to a GCP test project
providing the test project as an argument, e.g.
```shell
./gradlew deployNomulus -Penvironment=alpha
```
### Notable Issues
@@ -40,13 +42,8 @@ is easier to exclude the suite classes than individual test classes. This is the
reason why all test tasks in the :core project contain the exclude pattern
'"**/*TestCase.*", "**/*TestSuite.*"'
Many Nomulus tests are not hermetic: they modify global state, but do not clean
Some Nomulus tests are not hermetic: they modify global state, but do not clean
up on completion. This becomes a problem with Gradle. In the beginning we forced
Gradle to run every test class in a new process, and incurred heavy overheads.
Since then, we have fixed some tests, and manged to divide all tests into three
suites that do not have intra-suite conflicts. We will revisit the remaining
tests soon.
Note that it is unclear if all conflicting tests have been identified. More may
be exposed if test execution order changes, e.g., when new tests are added or
execution parallelism level changes.
Since then, we have fixed some tests, and manged to divide all tests into two
suites that do not have intra-suite conflicts (`fragileTest` and `standardTest`)

34
docs/local-testing.md Normal file
View File

@@ -0,0 +1,34 @@
# Local Testing
## Running a local development server
Nomulus provides a `RegistryTestServer` that is a lightweight test server
suitable for running local development. It uses local versions of all Google
Cloud Platform dependencies when available. Correspondingly, it is primarily
useful for doing web UI development (i.e. the registrar console). It allows you
to update Typescript, HTML, and CSS and see the changes simply by refreshing the
relevant page in your browser.
In order to serve content locally, there are two services that must be run: *
the `RegistryTestServer` to serve as the backing server * the Angular service to
provide the UI files
In order to do this in one step, from the `console-webapp` folder, run:
```shell
$ npm install
$ npm run start:dev
```
This will start both the `RegistryTestServer` and the Angular testing service.
Any changes to Typescript/HTML/CSS files will be recompiled and available on
page reload.
One it is running, you can interact with the console by going to
`http://localhost:4200` to view the registrar console in a web browser. The
server will continue running until you terminate the process.
If you are adding new URL paths, or new directories of web-accessible resources,
you will need to make the corresponding changes in `RegistryTestServer`. This
class contains all the routing and static file information used by the local
development server.

View File

@@ -7,7 +7,7 @@ production registry system.
[Stackdriver Monitoring](https://cloud.google.com/monitoring/docs/) is used to
instrument internal state within the Nomulus internal environment. This is
broadly called white-box monitoring. EPP, DNS, and WHOIS are instrumented. The
broadly called white-box monitoring. EPP, DNS, and RDAP are instrumented. The
metrics monitored are as follows:
* `/custom/dns/publish_domain_requests` -- A count of publish domain requests,
@@ -21,29 +21,27 @@ metrics monitored are as follows:
* `/custom/epp/processing_time` -- A [Distribution][distribution] representing
the processing time for EPP requests, described by command name, client id,
and return status code.
* `/custom/whois/requests` -- A count of WHOIS requests, described by command
* `/custom/rdap/requests` -- A count of RDAP requests, described by command
name, number of returned results, and return status code.
* `/custom/whois/processing_time` -- A [Distribution][distribution]
representing the processing time for WHOIS requests, described by command
* `/custom/rdap/processing_time` -- A [Distribution][distribution]
representing the processing time for RDAP requests, described by command
name, number of returned results, and return status code.
Follow the guide to [set up a Stackdriver
account](https://cloud.google.com/monitoring/accounts/guide) and associate it
with the GCP project containing the Nomulus App Engine app. Once the two have
been linked, monitoring will start automatically. For now, because the
Follow the guide to
[set up a Stackdriver account](https://cloud.google.com/monitoring/accounts/guide)
and associate it with the GCP project containing the Nomulus app. Once the two
have been linked, monitoring will start automatically. For now, because the
visualization of custom metrics in Stackdriver is embryronic, you can retrieve
and visualize the collected metrics with a script, as described in the guide on
[Reading Time
Series](https://cloud.google.com/monitoring/custom-metrics/reading-metrics) and
the [custom metric code
sample](https://github.com/GoogleCloudPlatform/python-docs-samples/blob/master/monitoring/api/v3/custom_metric.py).
[Reading Time Series](https://cloud.google.com/monitoring/custom-metrics/reading-metrics)
and the
[custom metric code sample](https://github.com/GoogleCloudPlatform/python-docs-samples/blob/master/monitoring/api/v3/custom_metric.py).
In addition to the included white-box monitoring, black-box monitoring should be
set up to exercise the functionality of the registry platform as a user would
see it. This monitoring should, for example, create a new domain name every few
minutes via EPP and then verify that the domain exists in DNS and WHOIS. For
now, no black-box monitoring implementation is provided with the Nomulus
platform.
minutes via EPP and then verify that the domain exists in DNS and RDAP. For now,
no black-box monitoring implementation is provided with the Nomulus platform.
## Updating cursors
@@ -80,11 +78,11 @@ scripts.
[RDE](https://newgtlds.icann.org/en/applicants/data-escrow) is a daily deposit
of the contents of the registry, sent to a third-party escrow provider. The
details are contained in Specification 2 of the [registry
agreement][registry-agreement].
details are contained in Specification 2 of the
[registry agreement][registry-agreement].
Nomulus provides [code to generate and send these
deposits](./operational-procedures/rde-deposits.md).
Nomulus provides
[code to generate and send these deposits](./operational-procedures/rde-deposits.md).
### Monthly registry activity and transaction reporting
@@ -92,8 +90,8 @@ ICANN requires monthly activity and transaction reporting. The details are
contained in Specification 3 of the [registry agreement][registry-agreement].
These reports are mostly generated by querying the Cloud SQL database. There is
currently a Google proprietary class to query DNS related activities that is
not included in the open source Nomulus release.
currently a Google proprietary class to query DNS related activities that is not
included in the open source Nomulus release.
### Zone File Access (ZFA)
@@ -102,27 +100,29 @@ information. The details are contained in part 2 of Specification 4 of the
[registry agreement][registry-agreement].
This information will come from the DNS server, rather than Nomulus itself, so
ZFA is not part of the Nomulus release.
ZFA is not directly part of the Nomulus release.
### Bulk Registration Data Access (BRDA)
BRDA is a weekly archive of the contents of the registry. The details are
contained in part 3 of Specification 4 of the [registry
agreement][registry-agreement].
contained in part 3 of Specification 4 of the
[registry agreement][registry-agreement].
ICANN uses sFTP to retrieve BRDA data from a server provided by the registry.
Nomulus provides [code to generate these
deposits](./operational-procedures/brda-deposits.md), but a separate sFTP server
must be configured, and the deposits must be moved onto the server for access by
ICANN.
Nomulus provides
[code to generate these deposits](./operational-procedures/brda-deposits.md),
but a separate sFTP server must be configured, and the deposits must be moved
onto the server for access by ICANN.
### Spec 11 reporting
[Spec 11][spec-11] reporting must be provided to ICANN as part of their
anti-abuse efforts. This is covered in Specification 11 of the
[registry agreement][registry-agreement], but the details are little spotty.
The Nomulus release does not generate these reports.
Nomulus provides
[code](https://github.com/google/nomulus/blob/master/core/src/main/java/google/registry/beam/spec11/Spec11Pipeline.java)
to generate and send these reports, run on
[a schedule](https://github.com/google/nomulus/blob/master/core/src/main/java/google/registry/config/files/tasks/cloud-scheduler-tasks-production.xml#L257-L267)
[distribution]: https://cloud.google.com/monitoring/api/ref_v3/rest/v3/TypedValue#Distribution
[registry-agreement]: https://newgtlds.icann.org/sites/default/files/agreements/agreement-approved-09jan14-en.pdf

View File

@@ -28,8 +28,8 @@ The BRDA copy task reads the previous file and creates two files:
```
If you see an `xml.ghostryde` file but not the others, an error has occurred
during the process. If you see the files in the
{PROJECT-ID}-icann-brda bucket as well, the process has completed successfully.
during the process. If you see the files in the {PROJECT-ID}-icann-brda bucket
as well, the process has completed successfully.
Once the files have been created, they must be stored on an sFTP server from
which ICANN can pull the files. The Nomulus project does not provide this last
@@ -40,11 +40,11 @@ The cursor can be checked using the `nomulus pending_escrow` command.
## Generating BRDA deposits manually
* Get a list of "REAL" (as opposed to TEST) TLDs. Doublecheck that the command
output doesn't contain any TLDs for tests.
* Get a list of "REAL" (as opposed to TEST) TLDs. Double-check that the
command output doesn't contain any TLDs for tests.
```shell
$ registry-tool -e production list_tlds --fields=tldStr,tldType | grep REAL | awk '{print $1}' > realtlds.txt`
$ nomulus -e production list_tlds --fields=tldStr,tldType | grep REAL | awk '{print $1}' > realtlds.txt`
```
* Generate .ryde and .sig files of TLDs specified for given date(s) in the

View File

@@ -0,0 +1,26 @@
# Creating or Modifying TLDs
Nomulus stores YAML representations of TLDs, in an effort to make sure that any
(potentially significant) modifications to TLDs go through source control and
code review. We recommend storing these TLD YAML representations in a separate
private repository so that changes can be verified by multiple people before
being merged
([here is an example TLD](https://github.com/google/nomulus/blob/master/core/src/test/resources/google/registry/tools/tld.yaml))
Creating and updating a TLD use the same process -- the only difference is
whether you're creating a TLD YAML file from scratch or modifying an existing
one.
Similar to [premium lists](premium-list-management.md) and
[reserved lists](reserved-list-management.md), we recommend modifying TLDs as a
part of an automated build process after the desired changes have been merged
into the TLD YAML files. The automated process should run:
```shell
nomulus -e {ENVIRONMENT} configure_tld --build_environment --input=path/to/my/file/tld.yaml
```
The `build_environment` flag signals that this is being run as part of an
automated build process and should ideally not be used manually. There is an
additional `--break_glass` argument that can be used in emergencies to modify
TLDs outside a normal build process.

View File

@@ -69,6 +69,18 @@ Perform this command? (y/N): y
Successfully saved premium list exampletld
```
### Note:
We recommend only updating premium lists manually in the case of emergencies.
Instead, we run the `update_premium_list` command (as well as `configure_tld`
and `update_reserved_list` commands) as part of the build process after a pull
request has been merged into the private source code repository that contains
the files. The `--build_environment` flag is used to signal that the command is
being run in one of those automated environments, and thus allowed to modify
production. Without that flag, commands against production will fail.
This is similar to the process for [updating TLDs](modifying-tlds.md).
If this premium list is already applied to a TLD, then changes will take up to
60 minutes to take effect (depending on how you've configured the relevant
caching interval; 60 minutes is the default).
@@ -80,16 +92,15 @@ premium list must first be applied to a TLD before it will take effect. You will
only need to do this when first creating a premium list; once it has been
applied, it stays applied, and updates to the list are effective automatically.
Note that each TLD can have no more than one premium list applied to it. To
apply a premium list to a TLD, run the `update_tld` command with the following
parameter:
apply a premium list to a TLD,
[update the TLD to set the premium list](modifying-tlds.md):
```shell
$ nomulus -e {ENVIRONMENT} update_tld exampletld --premium_list exampletld
Update Registry@exampletld
premiumList: null -> Key<?>(EntityGroupRoot("cross-tld")/PremiumList("exampletld"))
Perform this command? (y/N): y
Updated 1 entities.
...
pendingDeleteLength: "PT432000S"
premiumListName: "test"
pricingEngineClassName: "google.registry.model.pricing.StaticPremiumListPricingEngine"
...
```
## Checking which premium list is applied to a TLD
@@ -100,7 +111,7 @@ all other information about a TLD). It is used as follows:
```shell
$ nomulus -e {ENVIRONMENT} get_tld exampletld
[ ... snip output ... ]
premiumList=Key<?>(EntityGroupRoot("cross-tld")/PremiumList("exampletld"))
premiumListName: "test"
[ ... snip output ... ]
```
@@ -127,10 +138,10 @@ $ nomulus -e production check_domain {domain_name}
[ ... snip output ... ]
```
**Note that the list can be cached for up to 60 minutes, so the old value may
**Note that the list can be cached for up to 60 minutes, so the old value may
still be returned for a little while**. If it is urgent that the new pricing
changes be applied, and it's OK to potentially interrupt client connections,
then you can use the App Engine web console to kill instances of the `default`
then you can use the GCP web console to kill instances of the `frontend`
service, as the cache is per-instance. Once you've killed all the existing
instances (don't kill them all at once!), all of the newly spun up instances
will now be using the new values you've configured.
instances (don't kill them all at once!), all the newly spun up instances will
now be using the new values you've configured.

View File

@@ -16,9 +16,9 @@ phases:
3. [Report](https://github.com/google/nomulus/blob/master/java/google/registry/rde/RdeReportAction.java):
Transmit XML *report* file to ICANN via HTTPS.
Each phase happens with an App Engine task queue entry that retries on failure.
When each task succeeds, it automatically enqueues a task for the next phase in
the process. The staging files are stored in Google Cloud Storage indefinitely,
Each phase happens with an GCP task queue entry that retries on failure. When
each task succeeds, it automatically enqueues a task for the next phase in the
process. The staging files are stored in Google Cloud Storage indefinitely,
encrypted with the GhostRyDE container format.
Note that in order for the automated RDE processing to work correctly, you will
@@ -99,9 +99,10 @@ that no cooldown period is necessary.
## Listing deposits in Cloud Storage
You can list the files in Cloud Storage for a given TLD using the gcloud storage tool.
All files are stored in the {PROJECT-ID}-rde bucket, where {PROJECT-ID} is the
name of the App Engine project for the particular environment you are checking.
You can list the files in Cloud Storage for a given TLD using the gcloud storage
tool. All files are stored in the {PROJECT-ID}-rde bucket, where {PROJECT-ID} is
the name of the App Engine project for the particular environment you are
checking.
```shell
$ gcloud storage ls gs://{PROJECT-ID}-rde/zip_2015-05-16*
@@ -116,10 +117,12 @@ Under normal circumstances, RDE is launched by TldFanoutAction, configured in
cron.xml. If the App Engine's cron executor isn't working, you can spawn it
manually by visiting the following URL:
https://backend-dot-{PROJECT-ID}.appspot.com/_dr/task/rdeStaging
```
https://backend.mydomain.com/_dr/task/rdeStaging
```
That will spawn a staging task for each TLD under the backend module in that App
Engine project. You can also run the task from the cron tab of the GAE console.
That will spawn a staging task for each TLD under the backend module in that GCP
project. You can also run the task from the GCP Cloud Scheduler UI.
## Notification of upload problems
@@ -157,7 +160,7 @@ space the uploading at least two hours apart.
Note that this warning only applies when you (re)upload files directly to the
sFTP server. There's an RDE_UPLOAD_SFTP cursor that prevents the production
system from uploading twice in a two hour window, so when you let the production
system from uploading twice in a two-hour window, so when you let the production
job upload missing deposits, it will be safe. Therefore, one safe approach is to
reset the cursor, then kick off the production job manually.
@@ -172,7 +175,7 @@ $ gcloud storage cat gs://{PROJECT-ID}-rde/foo.ghostryde | nomulus -e production
## Identifying which phase of the process failed
Analyze the GAE logs on the backend module.
Analyze the GCP logs on the backend module.
If the rdeStaging task failed, then it's likely the files do not exist in cloud
storage.
@@ -308,8 +311,8 @@ sftp> put ${tld}_2015-05-16_full_S1_R0.sig
It would be convenient to have the following in your `~/.ssh/config` file and
store the SSH private key that you stored in `rde-ssh-client-private` as
`~/.ssh/id_rsa_rde` so that you can simply run `$ sftp rde` to connect to
the sFTP server.
`~/.ssh/id_rsa_rde` so that you can simply run `$ sftp rde` to connect to the
sFTP server.
```
Host rde

View File

@@ -5,36 +5,26 @@ for various reasons, usually because of potential abuse.
## Reserved list file format
Reserved lists are handled in a similar way to [premium
lists](./premium-list-management.md), except that instead of each label having
a price, it has a reservation type. The valid values for reservation types are:
Reserved lists are handled in a similar way to
[premium lists](./premium-list-management.md), except that instead of each label
having a price, it has a reservation type. The valid values for reservation
types are:
* **`NAMESERVER_RESTRICTED`** - Only nameservers included here can be set on a
domain with this label. If the a label in this type exists on multiple
reserved lists that are applied to the same TLD. The set of allowed
nameservers for that label in that TLD is the intersection of all applicable
nameservers. Note that this restriction is orthogonal to the TLD-wide
nameserver restrictions that may be otherwise imposed. The ultimate set of
allowed nameservers for a certain domain is the intersection of per-domain
and TLD-wide allowed nameservers set. Furthermore, a TLD can be set in a
domain create restricted mode, in which case **only** domains that are
reserved with this type can be registered.
* **`ALLOWED_IN_SUNRISE`** - The label can be registered during the sunrise
period by a registrant with a valid claim but it is reserved thereafter.
* **`RESERVED_FOR_SPECIFIC_USE`** - The label is reserved for the use of a
specific registrant, and can only be registered by someone sending along the
allocation token at time of registration. This token is configured on an
`AllocationToken` entity with a matching `domainName`, and is sent by the
registrar using the [allocation token EPP
extension](https://tools.ietf.org/id/draft-ietf-regext-allocation-token-07.html).
registrar using the
[allocation token EPP extension](https://tools.ietf.org/id/draft-ietf-regext-allocation-token-07.html).
* **`RESERVED_FOR_ANCHOR_TENANT`** - Like `RESERVED_FOR_SPECIFIC_USE`, except
for an anchor tenant (i.e. a registrant participating in a [Qualified Launch
Program](https://newgtlds.icann.org/en/announcements-and-media/announcement-10apr14-en)),
for an anchor tenant (i.e. a registrant participating in a
[Qualified Launch Program](https://newgtlds.icann.org/en/announcements-and-media/announcement-10apr14-en)),
meaning that registrations can occur during sunrise ahead of GA, and must be
for a two year term.
* **`NAME_COLLISION`** - The label is reserved because it is on an [ICANN
collision
list](https://www.icann.org/resources/pages/name-collision-2013-12-06-en).
* **`NAME_COLLISION`** - The label is reserved because it is on an
[ICANN collision list](https://www.icann.org/resources/pages/name-collision-2013-12-06-en).
It may be registered during sunrise by a registrant with a valid claim but
is reserved thereafter. The `SERVER_HOLD` status is automatically applied
upon registration, which will prevent the domain name from ever resolving in
@@ -53,16 +43,13 @@ label is reserved due to name collision (with message "Cannot be delegated"). In
general `FULLY_BLOCKED` is by far the most widely used reservation type for
typical TLD use cases.
Here's an example of a small reserved list. Note that the
`NAMESERVER_RESTRICTED` label has a third entry, a colon separated list of
nameservers that the label can be delegated to:
Here's an example of a small reserved list:
```
reserveddomain,FULLY_BLOCKED
availableinga,ALLOWED_IN_SUNRISE
fourletterword,FULLY_BLOCKED
acmecorp,RESERVED_FOR_ANCHOR_TENANT
internaldomain,NAMESERVER_RESTRICTED,ns1.internal.tld:ns1.internal.tld
```
# Reserved list file name format
@@ -96,8 +83,8 @@ Updated 1 entities.
Note that `-i` is the input file containing the list. You can optionally specify
the name of the reserved list using `-n`, but when it's omitted as above the
list name is inferred from the name of the filename (minus the file extension).
For ease of tracking track of things, it is recommended to store all lists such
that the filename and list name are identical.
For ease of tracking, it is recommended to store all lists such that the
filename and list name are identical.
You're not done yet! After creating the reserved list you must the apply it to
one or more TLDs (see below) for it to actually be used.
@@ -127,22 +114,16 @@ reserved lists applied. The list of reserved labels for a TLD is the union of
all applied reserved lists, using the precedence rules described earlier when a
label appears in more than one list.
To add a reserved list to a TLD, run the `update_tld` command with the following
parameter:
To add a reserved list to a TLD, [update the TLD](modifying-tlds.md):
```shell
$ nomulus -e {ENVIRONMENT} update_tld exampletld \
--add_reserved_lists common_bad-words
Update Registry@exampletld
reservedLists: null -> [Key<?>(EntityGroupRoot("cross-tld")/ReservedList("common_bad-words"))]
Perform this command? (y/N): y
Updated 1 entities.
...
reservedListNames:
- "common_bad-words"
- "exampletld_specialized-reservations"
...
```
The `--add_reserved_lists` parameter can take a comma-delimited list of reserved
list names if you are applying multiple reserved lists to a TLD. There is also a
`--remove_reserved_lists` parameter that functions as you might expect.
Naming rules are enforced: reserved lists that start with `common_` can be
applied to any TLD (though they don't automatically apply to all TLDs), whereas
reserved lists that start with the name of a TLD can only be applied to the TLD
@@ -156,9 +137,11 @@ purposes here. It is used as follows:
```shell
$ nomulus -e {ENVIRONMENT} get_tld exampletld
[ ... snip output ... ]
reservedLists=[Key<?>(EntityGroupRoot("cross-tld")/ReservedList("common_bad-words"))]
[ ... snip output ... ]
...
reservedListNames:
- "common_bad-words"
- "exampletld_specialized-reservations"
...
```
## Listing all available reserved lists
@@ -184,10 +167,10 @@ $ nomulus -e production check_domain {domain_name}
[ ... snip output ... ]
```
**Note that the list can be cached for up to 60 minutes, so changes may not
take place immediately**. If it is urgent that the new changes be applied, and
it's OK to potentially interrupt client connections, then you can use the App
Engine web console to kill instances of the `default` service, as the cache is
**Note that the list can be cached for up to 60 minutes, so changes may not take
place immediately**. If it is urgent that the new changes be applied, and it's
OK to potentially interrupt client connections, then you can use the GCP web
console to kill instances of the `frontend` service, as the cache is
per-instance. Once you've killed all the existing instances (don't kill them all
at once!), all of the newly spun up instances will now be using the new values
at once!), all the newly spun up instances will now be using the new values
you've configured.

View File

@@ -1,119 +1,35 @@
# TLD security restrictions
Nomulus has several security features that allow registries to impose additional
restrictions on which domains are allowed on a TLD and what
registrant/nameservers they can have. The restrictions can be applied to an
entire TLD or on a per-domain basis. These restrictions are intended for use on
closed TLDs that need to allow external registrars, and prevent undesired domain
registrations or updates from occurring, e.g. if a registrar makes an error or
is compromised. For closed TLDs that do not need external registrars, a simpler
solution is to not grant any registrars access to the TLD.
restrictions on which domains are allowed on a TLD and what nameservers they can
have. The restrictions can be applied to an entire TLD or on a per-domain basis.
These restrictions are intended for use on closed TLDs that need to allow
external registrars, and prevent undesired domain registrations or updates from
occurring, e.g. if a registrar makes an error or is compromised. For closed TLDs
that do not need external registrars, a simpler solution is to not grant any
registrars access to the TLD.
This document outlines the various restrictions available, their use cases, and
how to apply them.
## TLD-wide nameserver/registrant restrictions
Nomulus allows registry administrators to set registrant contact and/or
nameserver restrictions on a TLD. This is typically desired for brand TLDs on
which all domains are either self-hosted or restricted to a small set of
webhosts.
Nomulus allows registry administrators to set nameserver restrictions on a TLD.
This is typically desired for brand TLDs on which all domains are either
self-hosted or restricted to a small set of webhosts.
To configure allowed nameservers on a TLD, use the
`--allowed_nameservers`, `--add_allowed_nameservers`, and
`--remove_allowed_nameservers` parameters on the `update_tld` command as
follows:
```shell
$ nomulus -e {ENVIRONMENT} update_tld --allowed_nameservers {NS1,NS2,...} {TLD}
```
Note that `--allowed_nameservers` can also be used with the `create_tld` command
when the TLD is initially created.
To set the allowed registrants, use the analogous `--allowed_registrants`,
`--add_allowed_registrants`, and `--remove_allowed_registrants` parameters:
```shell
$ nomulus -e {ENVIRONMENT} update_tld \
--allowed_registrants {CONTACTID1,CONTACTID2,...} {TLD}
```
When nameserver or registrant restrictions are set on a TLD, any domain mutation
flow under that TLD will verify that the supplied nameservers or registrants
are not empty and that they are a strict subset of the allowed nameservers and
registrants on the TLD. If no restrictions are set, domains can be created or
updated without nameservers, but registrant is still always required.
## Per-domain nameserver restrictions
Registries can also elect to impose per-domain nameserver restrictions. This
restriction is orthogonal to the TLD-wide nameserver restriction detailed above.
Any domain mutation must pass both validations (if applicable). In practice, it
is recommended to maintain consistency between the two types of lists by making
the per-domain allowed nameserver list a subset of the TLD-wide one, because any
nameservers that are not included in both lists are effectively disallowed.
The per-domain allowed nameserver lists are configured in [reserved
list](./reserved-list-management.md) entries with the reservation type
`NAMESERVER_RESTRICTED`. The final element in the entry is the colon-delimited
list of nameservers, e.g.:
To [configure allowed nameservers on a TLD](modifying-tlds.md), use the
`allowedFullyQualifiedHostNames` field in the TLD YAML file:
```
restrictedsld,NAMESERVER_RESTRICTED,ns1.mycompany.tld:ns2.mycompany.tld
addGracePeriodLength: "PT432000S"
allowedFullyQualifiedHostNames:
- "ns1.test.goog"
- "ns2.test.goog"
- "ns3.test.goog"
```
Note that multiple reserved lists can be applied to a TLD. If different reserved
lists contain nameserver restrictions for the same label, then the resulting
restriction set is the set intersection of all allowed nameserver lists for that
label.
## Domain create restriction on closed TLDs
Nomulus offers the ability to "lock-down" a TLD so that domain registration is
forbidden except for allow-listed domain names. This is achieved by setting the
"domain create restricted" option on the TLD using the `nomulus` tool. Domains
are allow-listed for registration by adding them to reserved lists with entries
of type `NAMESERVER_RESTRICTED`. Each domain will thus also need to have
explicitly allowed nameservers configured in its reserved list entry, per the
previous section.
To apply domain create restriction when creating/updating a TLD, use the
`--domain_create_restricted` parameter as follows:
```shell
$ nomulus -e {ENVIRONMENT} [create_tld | update_tld] \
--domain_create_restricted [true | false] {TLD}
```
Note that you do **not** have to set a TLD-wide allowed nameservers list with
this option, because it operates independently from the per-domain nameservers
restriction that `NAMESERVER_RESTRICTED` reservation imposes.
In addition to disabling registration of non-allow-listed domains, setting a TLD
as domain create restricted also applies the `SERVER_UPDATE_PROHIBITED` and
`SERVER_TRANSFER_PROHIBITED` statuses to domains upon creation. Any domains on a
domain create restricted TLD are therefore virtually immutable, and must be
unlocked by the registry operator before each change can be made. For more
information on these EPP statuses, see [RFC
5731](https://tools.ietf.org/html/rfc5731#section-2.3).
To an unlock a locked domain so that a registrar can make changes, the registry
operator must remove the status using a `nomulus` tool command as follows:
```shell
$ nomulus -e {ENVIRONMENT} update_server_locks \
--remove SERVER_UPDATE_PROHIBITED,SERVER_TRANSFER_PROHIBITED \
--client {REGISTRAR_CLIENT_ID}
--n {DOMAIN}
```
Note that these statuses will be reapplied immediately after any transfer/update
so long as the TLD is still set to domain create restricted.
Since the domain create restricted facility is intended for use on closed TLDs,
validation/server lock does not happen in domain application and allocate flows.
Most closed TLDs do not have a sunrise period, so this is fine, but for the
unanticipated occasion that a sunrise period is necessary, it suffices to
manually ensure that all domains are correct immediately after entering general
availability, after which no additional disallowed changes can be made.
When nameserver restrictions are set on a TLD, any domain mutation flow under
that TLD will verify that the supplied nameservers are not empty and that they
are a strict subset of the allowed nameservers and registrants on the TLD. If no
restrictions are set, domains can be created or updated without nameservers.

View File

@@ -1,18 +1,18 @@
# RDAP user's guide
[RDAP](https://www.icann.org/rdap) is a JSON REST protocol, served over HTTPS,
for retrieving registry information. It returns data similar to the WHOIS
service, but in a JSON-structured format. This document describes the Nomulus
system's support for the RDAP protocol.
for retrieving registry information. It returns data similar to the
previously-existing WHOIS service, but in a JSON-structured format. This
document describes the Nomulus system's support for the RDAP protocol.
## Quick example <a id="quick_example"></a>
RDAP information is available via regular Web queries. For example, if your App
Engine project ID is `project-id`, and `tld` is a TLD managed by that instance
of Nomulus, enter the following in a Web browser:
RDAP information is available via regular Web queries. For example, if your base
domain is `mydomain.com`, and `tld` is a TLD managed by that instance of
Nomulus, enter the following in a Web browser:
```
https://project-id.appspot.com/rdap/domains?name=*.tld
https://pubapi.mydomain.com/rdap/domains?name=*.tld
```
You should get back a long string of apparent JSON gobbledygook, listing the
@@ -24,34 +24,21 @@ the response in an expandable tree format.
## Introduction to RDAP <a id="introduction"></a>
RDAP is a next-generation protocol for dissemination of registry data. It is
eventually intended to replace the WHOIS protocol. RDAP was defined in 2015 in a
series of RFCs:
RDAP is a next-generation protocol for dissemination of registry data. It has
replaced the WHOIS protocol. RDAP was defined as STD 95 as a series of RFCs:
* [RFC 7480: HTTP Usage in the Registration Data Access Protocol
(RDAP)](https://tools.ietf.org/html/rfc7480)
* [RFC 7480: HTTP Usage in the Registration Data Access Protocol (RDAP)](https://tools.ietf.org/html/rfc7480)
* [RFC 7481: Security Services for the Registration Data Access Protocol
(RDAP)](https://tools.ietf.org/html/rfc7481)
* [RFC 7482: Registration Data Access Protocol (RDAP) Query
Format](https://tools.ietf.org/html/rfc7482)
* [RFC 7483: JSON Responses for the Registration Data Access Protocol
(RDAP)](https://tools.ietf.org/html/rfc7483)
* [RFC 7484: Finding the Authoritative Registration Data (RDAP)
Service](https://tools.ietf.org/html/rfc7484)
* [RFC 9082: Registration Data Access Protocol (RDAP) Query Format](https://tools.ietf.org/html/rfc9082)
* [RFC 9083: JSON Responses for the Registration Data Access Protocol (RDAP)](https://tools.ietf.org/html/rfc9083)
* [RFC 9224: Finding the Authoritative Registration Data (RDAP) Service](https://tools.ietf.org/html/rfc9224)
Using RDAP, users can send in standard HTTP requests with specific URLs (such as
`.../rdap/domain/example.com` to get information about the domain `example.com`,
or `.../rdap/domains?name=ex*.com` to get information about domains beginning
with `ex` and having the TLD `.com`), and receive back a JSON response
containing the requested data. The data is more or less the same information
that WHOIS provides today, but formatted in a more standardized and
machine-readable manner.
ICANN is sponsoring a one-year [RDAP Pilot
Program](https://www.icann.org/news/announcement-2017-09-05-en), allowing
registries to implement RDAP and, if desired, make modifications to the protocol
to support desired extra features. Nomulus is participating in this program. The
experimental extra features supported by Nomulus are described later.
containing the requested data.
## Nomulus RDAP request endpoints <a id="endpoints"></a>
@@ -60,7 +47,7 @@ the usual App Engine server name. For example, if the App Engine project ID is
`project-id`, the full path for a domain lookup of domain iam.soy would be:
```
https://project-id.appspot.com/rdap/domain/iam.soy
https://pubapi.mydomain.com/rdap/domain/iam.soy
```
The search endpoints (those with a query string) can return more than one match.
@@ -92,8 +79,6 @@ used; Nomulus' wildcard rules are described below.
A maximum of 100 domains will be returned in response to a single query. If more
than one domain is returned, only data about the domain itself is included. If a
single domain is returned, associated nameservers and contacts are also returned
(except that requests not authenticated as associated with the registrar of the
domain will not see contact information).
#### Search by domain name
@@ -160,34 +145,28 @@ Wildcards are not supported for IP address lookup.
### /rdap/entity/ <a id="entity"></a>
Look up a single entity by name. The entity ID is specified at the end of the
path. Two types of entities can be looked up: registrars (looked up by IANA
registrar ID) and contacts (lookup up by ROID). Registrar contacts are also
returned in results as entities, but cannot be looked up by themselves; they
only appear as part of information about a registrar.
path. Because the registry does not store contact data, only registrar entities
may be looked up (by IANA registrar ID). Registrar contacts are also returned in
results as entities, but cannot be looked up by themselves; they only appear as
part of information about a registrar.
```
/rdap/entity/registrar-id
/rdap/entity/ROID
```
### /rdap/entities? <a id="entities"></a>
Search for one or more entities (registrars or contacts). The RDAP specification
supports two kinds of searches: by full name or by handle. In either case,
requests not authenticated as associated with the registrar owning a contact
will not see personal data (name, address, email, phone, etc.) for the contact.
The visibility of registrar information, including registrar contacts, is
determined by the registrar's chosen WHOIS visibility settings.
Search for one or more registrars. The RDAP specification supports two kinds of
searches: by full name or by handle. The visibility of registrar information,
including registrar contacts, is determined by the registrar's chosen WHOIS/RDAP
visibility settings.
#### Search by full name
Entity searches by full name use the `fn` query parameter. Results can include
contacts with a matching name, registrars with a matching registrar name, or
both. For contacts, the name used is the internationalized postal info name, if
present, or the localized postal info name otherwise.
Entity searches by full name use the `fn` query parameter, matching the
registrar name.
```
/rdap/entities?fn=Joseph%20Smith
/rdap/entities?fn=tucows
```
@@ -195,27 +174,16 @@ A trailing wildcard is allowed, but at least two characters must precede the
wildcard.
```
/rdap/entities?fn=Bobby%20Joe*
/rdap/entities?fn=tu*
```
#### Search by handle
Entity searches by handle use the `handle` query parameter. Results can include
contacts with a matching ROID, registrars with a matching IANA registrar number,
or both.
Entity searches by handle use the `handle` query parameter. Results will include
registrars with a matching IANA registrar number.
```
/rdap/entities?handle=12
/rdap/entities?handle=ROID-1234
```
A trailing wildcard is allowed, with at least two character preceding it.
However, wildcard matching is only performed for contacts. Registrars will never
be returned for a wildcard entity search.
```
/rdap/entities?handle=ROID-12*
```
### /rdap/help/ <a id="help"></a>
@@ -267,34 +235,23 @@ the RDAP endpoints.
The RDAP RFCs do not include support for authentication or access controls. We
have implemented an experimental version that allows for authenticated access to
sensitive data such as contact names and addresses (a longtime concern with
WHOIS). We do this by leveraging the existing authentication/authorization
functionality of Nomulus' registrar console. Requests which can be authenticated
as coming from a specific registrar have access to all information about that
registrar's contact. Requests authenticated as coming from administrators of the
App Engine project have access to all contact information. In other cases, the
sensitive data will be hidden, and only the contact ROIDs and roles will be
displayed.
The registrar console uses the App Engine Users API to authenticate users. When
a request comes in, App Engine attempts to authenticate the user's email
address. If authentication is successful, the email address is then checked
against all registrar contacts in the system. If Nomulus is able to find a
matching registrar contact which has the `allow_console_access` permission, the
request is authorized for the associated registrar.
sensitive data such as information about deleted domains. We do this by
leveraging the existing authentication/authorization functionality of Nomulus'
registrar console. Requests which can be authenticated as coming from a specific
registrar have access to all information about that registrar's contact.
Requests authenticated as coming from administrators of the GCP project have
access to all information. In other cases, the sensitive data will be hidden.
RDAP uses the same logic, but the registrar association is used only to
determine whether the request can see sensitive contact information (and deleted
items, as described in the section about the `includeDeleted` parameter).
Unauthenticated requests can still retrieve data, but that data will not be
visible.
determine whether the request can see deleted items. Unauthenticated requests
can still retrieve data, but deleted items will not be visible.
To use RDAP in an authenticated fashion, first set up your email address for use
in the registrar console, as described elsewhere. Then check that you have
access to the console by loading the page:
```
https://project-id.appspot.com/registrar
https://mydomain.com/console
```
If you can see the registrar console, you are logged in correctly. Then change
@@ -303,9 +260,9 @@ see all data for your associated registrar.
### `registrar` parameter <a id="registrar_parameter"></a>
Ordinarily, all matching domains, hosts and contacts are included in the
returned result set. A request can specify that only items owned by a specific
registrar be included, by adding an extra parameter:
Ordinarily, all matching domains and hosts are included in the returned result
set. A request can specify that only items owned by a specific registrar be
included, by adding an extra parameter:
```
/rdap/domains?name=*.tld&registrar=registrar-client-string
@@ -317,9 +274,9 @@ to be shown that otherwise would not.
### `includeDeleted` parameter <a id="includedeleted_parameter"></a>
Ordinarily, deleted domains, hosts and contacts are not included in search
results. Authorized requests can specify that deleted items be included, by
adding an extra parameter:
Ordinarily, deleted domains and hosts are not included in search results.
Authorized requests can specify that deleted items be included, by adding an
extra parameter:
```
/rdap/domains?name=*.tld&includeDeleted=true
@@ -341,14 +298,6 @@ formatted version can be requested by adding an extra parameter:
The result is still valid JSON, but with extra whitespace added to align the
data on the page.
### `subtype` parameter <a id="subtype_parameter"></a>
The subtype parameter is used only for entity searches, to select whether the
results should include contacts, registrars or both. If specified, the subtype
should be 'all', 'contacts' or 'registrars'. Setting the subtype to 'all'
duplicates the normal behavior of returning both. Setting it to 'contacts' or
'registrars' causes an entity search to return only contacts or only registrars.
### Next page links <a id="next_page_links"></a>
The number of results returned in a domain, nameserver or entity search is
@@ -384,7 +333,7 @@ truncated.
{
"type" : "application/rdap+json",
"href" :
"https://ex.com/rdap/domains?name=abc*.tld&cursor=a5927CDb902wE=",
"https://pubapi.mydomain.com/rdap/domains?name=abc*.tld&cursor=a5927CDb902wE=",
"rel" : "next"
}
],
@@ -392,9 +341,3 @@ truncated.
},
...
```
### Additional features
We anticipate adding additional features during the pilot program, such as the
ability to page through search results. We will update the documentation when
these features are implemented.

View File

@@ -3,52 +3,87 @@
This document contains some of the questions that registrars are likely to ask
about the system. In most cases, the question can be answered generally for
Nomulus systems. In some cases, the details depend on things outside the scope
of Nomulus, such as EPP and WHOIS proxies and DNS systems.
of Nomulus, such as EPP proxies and DNS systems.
## Technical onboarding
**1.1 Do you provide a web based interface for registrars to manage domain names
**1.1 Do you provide documentation for OT&E?**
Yes, a document titled, “OT&E Instructions” is included as part of the
onboarding package. It explains what the test cases are, how to start a test,
and how to conduct a test.
**1.2 Do you offer a second OT&E account available to test domain transfers?**
Yes, we will be offering sandbox credentials for a test TLD that can be used to
test domain transfers.
**1.3 Do you provide a web based interface for registrars to manage domain names
(URL)?**
There is no web based interface to manage domains at this time; registrars must
use EPP commands.
There is no web based interface to manage domains at this time, registrars must
be EPP integrated.
**1.2 Will a Registrar Tool Kit be made available? If yes, what is included in
There is, however, a support console available only to registrars with
production EPP access (who have passed OT&E) which provide registrars with basic
self-service configuration management of their account with Google Registry.
**1.4 Will a Registrar Tool Kit be made available? If yes, what is included in
the Tool Kit?**
Registry has provided this technical FAQ documentation on how to use the core
EPP protocols and supported extensions. An EPP SDK is not provided. The
registrar can develop EPP client using any existing EPP client libraries (e.g.
Verisign, Afilias, or any Open Source variants).
Google Registry has provided this technical FAQ documentation on how to use the
core EPP protocols and supported extensions. Google Registry will not provide an
EPP SDK. Registrar can develop EPP client using any existing EPP client
libraries (e.g. Verisign, Afilias, or any Open Source variants).
**1.3 Does the registry provide a secure certificate? Are multiple
certifications required across gTLDs?**
**1.5 Do I have to complete OT&E again for each new TLD launched by Google
Registry?**
You will only need to complete OT&E once. Any new functionality released with
new TLD launches will be communicated by email, and updated technical material
will be provided. We highly recommend that you test any new functionality in our
Sandbox. If you want to conduct a full test, or only test specific sections
related to new features, please contact technical support.
**1.6 Do you provide reports about the domains under management?**
Yes, Google Registry will provide a detailed activity report accompanying every
monthly invoice via Google Drive. For reports about domains under management,
please request through our technical support.
**1.7 How long is the absolute timeout for a registry connection?**
An idle connection will be disconnected after 60 minutes.
**1.8 Will the registry provide a secure certificate? Will multiple
certifications be required across gTLDs?**
*[ The answer to this question depends on the details of your EPP proxy
implementation. Here is how we answer it: ]*
The registry does not provide a secure certificate. Registrars must provide
their own certificate during onboarding, which will be allow-listed for the
The registry will not provide a secure certificate. Registrars must provide
their own certificate during onboarding, which will be allowlisted for the
connection. A single certificate can be used for multiple TLDs.
**1.4 Locks and statuses: do lock and status rules follow RFC specifications?**
**1.9 Locks and statuses: will lock and status rules follow RFC
specifications?**
Yes, lock and status rules follow RFC specifications.
Yes, lock and status rules will follow RFC specifications.
**1.5 How can we receive the registry zone file (SFTP, web-based download)?**
**1.10 How can we receive the registry zone file (SFTP, web-based download)?**
The zone file is available by requesting through ICANNs [Centralized Zone Data
Service](https://czds.icann.org).
The zone file will be available by requesting it through ICANNs
[Centralized Zone Data Service](https://czds.icann.org).
## Domain names
**2.1 Are second level IDNs supported?**
For open TLDs launched by Registry, Latin script (which includes all Spanish
alphabet characters) and the Kanji, Hiragana, and Katakana scripts are supported
at the second level. Please refer to the IDN tables on [IANA
website](http://www.iana.org/domains/idn-tables) for the list of IDNs supported
for each specific TLD.
All open TLDs launched by Google Registry support Latin script (which includes
all Spanish alphabet characters) at the second level. The following TLDs also
support the Kanji, Hiragana, and Katakana scripts: .app, .boo, .channel, .day,
.dev, .how, .new, .page, .rsvp, .soy, and .みんな . Please refer to the IDN tables
on the [IANA website](https://www.iana.org/domains/idn-tables).
**2.2 Are variants of the domain name allowed?**
@@ -60,16 +95,28 @@ we currently support (Japanese and Latin) do not have variants.
No, the restrictions for an IDN domain are the same as those for an ASCII
domain.
**2.4 Is NDN (for example, 1234.tld) allowed for registration?**
**2.4 Are domains on ICANNs collision list available for application or
registration under Sunrise, Landrush, and General Launch?**
The controlled interruption period for our available open TLDs have ended .
Collision names for .app, .HOW, .MINNA and .SOY are currently available for
registration.
**2.5 Is NDN (for example, 1234.tld) allowed for registration?**
Yes. Numeric Domain Names are allowed.
**2.5 What are the restrictions on the length of a domain?**
**2.6 What are the restrictions on the length of a domain?**
A second level domain can be any length up to 15 characters if it contains any
Japanese codepoints, or 63 characters if it only contains ASCII or Latin script
codepoints.
**2.7 Is Google Registry MIIT Accredited?**
Google Registry does not have, nor intend to pursue at this time, MIIT
accreditation.
## DNSSEC
**3.1 What version of DNSSEC is being implemented? Will it be supported at
@@ -88,7 +135,8 @@ No, DNSSEC nameservers are not be pre-checked for DNSSEC data.
**3.4 What are the valid algorithm values for a DS record?**
We do not validate DS records, so any value is accepted.
Algorithm values 0-3, 5-8, 10, 12-16, and 252-253 are supported. Digest values 1
(SHA-1), 2 (SHA-256), and 4 (SHA-384) are supported.
**3.5 Is the maxSigLife attribute enabled?**
@@ -96,13 +144,13 @@ No, the maxSigLife attribute is not supported.
**3.6 What is the maximum number of DNSSEC records allowed per domain?**
A maximum of eight DNSSEC records are allowed per domain.
Eight is the maximum number of DNSSEC records allowed per domain.
## Nameservers
**4.1 Do you support host objects or host attributes for the domain:ns tag?**
Registry supports host objects. Host attributes are not supported.
Google Registry supports host objects. Host attributes are not supported.
**4.2 Are there any restrictions on the length of the host?**
@@ -154,77 +202,66 @@ the same domain?**
Yes, duplicate IP addresses can be listed between different host objects for the
same domain.
**4.12 How many nameservers can be set on a domain?**
**4.12 Will the registration of nameservers be charged?**
No, there is no charge for the registration of nameservers.
**4.13 Will the update of nameservers be charged?**
No, nameservers can be updated free of charge.
**4.14 How many nameservers can be set on a domain?**
Thirteen nameservers can be set on a domain.
**4.13 Do you allow the usage of glue nameservers?**
**4.15 Do you allow the usage of glue nameservers?**
We require IP address information for nameservers subordinate to a TLD, and do
not allow it otherwise.
**4.14 What is your procedure for the handling of Orphan Glue Records?**
**4.16 What is your procedure for the handling of Orphan Glue Records?**
We do not have a procedure for the handling of Orphan Glue records because we
will not have Orphan Glue Records. When a host object is no longer referred to
by any domain, its glue is removed.
**4.15 What are the authoritative domain name servers for Registry TLDs?**
**4.17 What are the authoritative domain name servers for Registry TLDs?**
*[ The answer to this question will depend on your DNS implementation. ]*
## Contacts
**5.1 What contacts are required to register a domain?**
As of the beginning of 2026, the registry has transitioned to the Minimum Data
Set, also known as a "thin registry". This means that contact information is not
stored, including registrant information.
A registrant, administrative, and technical contact are required to register a
domain. A billing contact is not required. An abuse contact will be required
beginning in 2017, as per ICANN directive. All contacts may point at the same
contact ID. Note that multiple contacts of the same type are not allowed.
## WHOIS / RDAP
**5.2 Can contact information be updated at anytime without a fee?**
**6.1 Is the WHOIS protocol supported?**
Yes, contact information can be updated at any time without a fee.
No. We have discontinued support for the legacy WHOIS protocol (Port 43) in
favor of RDAP (Registration Data Access Protocol). Please update your systems to
query our RDAP service.
**5.3 Are proxy contacts allowed on the domain?**
**6.2: What is the base URL for RDAP requests?**
Yes, proxy contacts are allowed on the domain.
**5.4 Do contact attributes follow the RFC standard?**
Yes, contact attributes follow the RFC standard.
**5.5 Are ISO country codes used?**
Yes, ISO country codes are used.
**5.6 Is a transfer of Contacts possible?**
Yes, contacts may be transferred.
## WHOIS
**6.1 What is the URL and port number for WHOIS requests?**
*[ The answer to this question will depend on your WHOIS proxy implementation.
*[ The answer to this question will depend on your implementation.Here is ours:
]*
**6.2 Does your policy allow WHOIS Privacy?**
The base URL for RDAP requests is https://pubapi.registry.google/rdap/.
Whois Privacy is allowed.
**6.3: Is any authentication required for RDAP requests?**
**6.3 Is a Bulk WHOIS available?**
No, there is no authentication process currently implemented for our RDAP
server.
No, Bulk WHOIS is not available.
**6.4: What will be the output of RDAP requests?**
**6.4 What are the WHOIS rate limitations?**
Responses from the RDAP server will be in JSON format, conforming to STD 95.
*[ The answer to this question will depend on your WHOIS proxy implementation.
]*
**6.5 How often is RDAP information updated?**
**6.5 How often is WHOIS information updated?**
WHOIS is updated in near real time.
RDAP is updated in near real time.
## EPP - General
@@ -232,70 +269,70 @@ WHOIS is updated in near real time.
*[ The answer to this question will depend on your EPP proxy implementation. ]*
**7.2 Will you provide a standard Pool and Batch account (e.g. as VeriSign)?**
**7.2 What version of EPP is supported?**
The registry supports EPP version 1.0.
**7.3 Will you provide a standard Pool and Batch account (e.g. as VeriSign)?**
No, we will not provide a standard Pool and Batch account.
**7.3 Does the registry support thick domain registrations?**
**7.4 Does the registry support thick domain registrations?**
Yes, Registry supports thick domain registrations.
No, we have moved to the Minimum Data Set allowed by ICANNs Registration Data
Policy and no longer store any contact information.
**7.4 What EPP commands exist to retrieve the account balance of the
**7.5 What EPP commands exist to retrieve the account balance of the
registrar?**
None at the current time.
**7.5 Are there any restrictions on the volume of commands allowed during a
**7.6 Are there any restrictions on the volume of commands allowed during a
session?**
No, there are no restriction on the volume of commands allowed during a session.
**7.6 Are you using special extensions for your EPP API?**
**7.7 Are you using special extensions for your EPP API?**
[Launch Phase
Mapping](http://tools.ietf.org/html/draft-ietf-eppext-launchphase-01) and
Registry Fee Extension (versions 0.6 and 0.11, as described below) are
supported.
[Launch Phase Mapping](http://tools.ietf.org/html/draft-ietf-eppext-launchphase-01)
and Registry Fee Extension (version 1.0, as described below) are supported.
**7.7 Which domain status are allowed for the domains?**
**7.8 Which domain status are allowed for the domains?**
Standard EPP statuses are allowed.
**7.8 Do you use a real-time or non real-time registration system?**
**7.9 Do you use a real-time or non real-time registration system?**
Registry uses a real-time registration system.
**7.9 What are your connection policies to the registry system (e.g., how many
**7.10 What are your connection policies to the registry system (e.g., how many
connections are allowed)?**
*[ The answer to this question will depend on your App Engine configuration. ]*
*[ The answer to this question will depend on your deployment configuration. ]*
**7.10 Are you using a Shared Registry System or a Dedicated Registry System
**7.11 Are you using a Shared Registry System or a Dedicated Registry System
(separate EPP servers for each TLD)?**
We have a shared registry system for EPP, with a shared namespace across all
supported TLDs. Contacts and hosts are shared across all TLDs; for instance, the
same contact can be used for all of a registrar's domains in the system.
supported TLDs.
**7.11 If using a DRS, are login credentials, IP allow listing, etc. configured
**7.12 If using a DRS, are login credentials, IP allow listing, etc. configured
separately or will these be the same for all TLDs in your system?**
These will be the same for all TLDs, because we are a shared registry system.
There is a single login regardless of TLD, and the same login session can be
used to send EPP commands for all TLDs.
These will be the same for all TLDs.
**7.12 What types of signed marks do you accept for sunrise applications?**
**7.13 What types of signed marks do you accept for sunrise applications?**
We only accept encoded signed marks that are signed by the TMCH. Non-encoded
signed marks are not supported.
**7.13 Where do I get encoded signed marks to use during the OT&E process?**
**7.14 Where do I get encoded signed marks to use during the OT&E process?**
You can use any of the active test SMDs provided by ICANN, which can be found by
following the link in [this ICANN PDF
document](http://newgtlds.icann.org/en/about/trademark-clearinghouse/smd-test-repository-02oct13-en.pdf).
following the link in
[this ICANN PDF document](http://newgtlds.icann.org/en/about/trademark-clearinghouse/smd-test-repository-02oct13-en.pdf).
**7.14 How do I specify the EPP launch phases (i.e. Sunrise and Landrush)?**
**7.15 How do I specify the EPP launch phases (i.e. Sunrise and Landrush)?**
For applications during the Sunrise phase, the launch phase should be specified
as follows:
@@ -307,7 +344,7 @@ as follows:
`<launch:phase>landrush</launch:phase>`
**7.15 Do you accept domain labels with upper-case characters?**
**7.16 Do you accept domain labels with upper-case characters?**
No, the registry will reject domain labels with any uppercase characters. While
DNS labels are inherently case-insensitive
@@ -315,20 +352,20 @@ DNS labels are inherently case-insensitive
canonicalize to lowercase to avoid confusion matching up labels on EPP responses
and poll messages.
**7.16 When should I run a claims check?**
**7.17 When should I run a claims check?**
Claims checks are required during the General Availability and Landrush periods
for domains matching a label existing in the TMCH claims database. Otherwise,
the registration/application will be rejected. Claims checks are not allowed
during Sunrise.
**7.17 How can I get the claims notice from TMDB using the TCNID?**
**7.18 How can I get the claims notice from TMDB using the TCNID?**
Section 3.1 of [this ICANN PDF
document](http://newgtlds.icann.org/en/about/trademark-clearinghouse/scsvcs/db-registrar-06dec13-en.pdf)
Section 3.1 of
[this ICANN PDF document](http://newgtlds.icann.org/en/about/trademark-clearinghouse/scsvcs/db-registrar-06dec13-en.pdf)
provides an overview of how you can access the TMDB system.
**7.18 What happens if I send an explicit renew command during the auto-renew
**7.19 What happens if I send an explicit renew command during the auto-renew
grace period?**
If an explicit renewal command is sent through during the auto-renew grace
@@ -338,7 +375,7 @@ expiration date (not overrule the auto-renew).
Note however that this is not the case for transfers; a transfer during the
auto-renew grace period overrules the auto-renew.
**7.19 What testing environments are available to registrars on sandbox?**
**7.20 What testing environments are available to registrars on sandbox?**
In addition to the existing registrar-specific OTE EPP accounts and TLDs, we
provide a production-like development environment. All registrars are able to
@@ -350,28 +387,35 @@ These development TLDs can be accessed at:
* EPP hostname: *[ this depends on your EPP proxy implementation ]*
* Using OTE EPP accounts: `<registrarid>-3` and `<registrarid>-4`
**7.20 When using the Verisign EPP Tool to test access to the registry, I see
an "Error Reading Server Greeting" message.**
These TLDs will be shared across all onboarded registrars and configured with
similar settings to the respective TLDs in the production environment to enable
registrars to perform any required tests.
This is usually the result of the connection being terminated by the server
because no security certificate is present. The EPP Tool's Getting Started Guide
describes how to configure the connection profile to include a certificate using
the ssl.\* parameters. A certificate is required in order to connect to
the registry.
**7.21 Do you support in-band registrar password update (updating passwords
within an EPP command)?**
No, the registrar will need to contact the registry directly to update a
password.
**7.22 What happens to a deleted domains expiration date when it is restored?**
Successful restore requests reinstate the domains original expiration date,
unless the domain has already expired. Expired domain restores include a
mandatory 1 year renewal (billed separately) applied to the original expiration
date.
**7.23 What is the maximum number of objects that can be checked with a single
<domain:check> command?**
There is a maximum of 50 objects that can be checked per <domain:check> command.
## EPP - Fee extension
**8.1 Which version of the Fee Extension draft do you support?**
**8.1 Which version of the Fee Extension do you support?**
We currently support two versions of the Registry Fee Extension:
We currently support the Fee Extension version 1.0:
[urn:ietf:params:xml:ns:fee-0.6](https://tools.ietf.org/html/draft-brown-epp-fees-03)
[urn:ietf:params:xml:ns:fee-0.11](https://tools.ietf.org/html/draft-ietf-regext-epp-fees-00)
Note that the version of the specification document is not the same as the
version of the fee extension URI itself. Also, version 0.11 is still in the
process of being finalized, so for the moment, version 0.6 is to be preferred.
[urn:ietf:params:xml:ns:epp:fee-1.0](https://datatracker.ietf.org/doc/rfc8748/)
**8.2 Where is the Fee Extension supported?**
@@ -382,7 +426,7 @@ environment.
* Declare the fee extension in the `<login>` command:
`<extURI>urn:ietf:params:xml:ns:fee-0.6</extURI>`
`<extURI>urn:ietf:params:xml:ns:epp:fee-1.0</extURI>`
* Use of the fee extension is optional for EPP query commands. If the client
request includes the fee extension, fee information will be included in the
@@ -458,8 +502,7 @@ command phases or subphases.
include fee and claim extensions in the same `<domain:check>` command?**
Fee and claim checks are not allowed in the same `<domain:check>` command.The
[Launch
Phase](http://tools.ietf.org/html/draft-tan-epp-launchphase-12#section-3.1)
[Launch Phase](http://tools.ietf.org/html/draft-tan-epp-launchphase-12#section-3.1)
extension defines two types of domain checks: availability checks and claim
checks. The `<fee:check>` command is only allowed in availability check
commands.
@@ -470,10 +513,25 @@ commands.
* Renew - 5 days
* Transfer - 5 days
* Pending Delete - 5 days
* Sunrise and Landrush Add/Drop - 30 days
* Sunrise Add/Drop - 30 days
* Redemption - 30 days
* Auto-Renew - 45 days
**8.13 How do I use the fee extension during the Early Access Period?**
The use of the fee extension is mandatory in EAP for all registrations. You will
need to specify two items in <domain:create> requests, one for the normal price,
one for the Early Access Fee. These should be specified as in the following
example:
```
<fee:create xmlns:fee="urn:ietf:params:xml:ns:fee-0.6">
<fee:currency>USD</fee:currency>
<fee:fee description="create">70</fee:fee>
<fee:fee description="Early Access Period">80.00</fee:fee>
</fee:create>
```
## Security
*[ The answers in this section depend on your EPP proxy implementation. These
@@ -489,10 +547,26 @@ support console to manage the IP allow list for your production account.
**9.2 What SSL certificates will you accept for EPP connections?**
We will accept any SSL certificate. You will be asked to submit your certificate
for allow-listing during the onboarding process. After completion of the
onboarding process, you can use the support console to manage the certificate
for your production account.
You must connect via TLS v1.2 or higher. All TLS v1.3 cipher suites are
supported. For TLS v1.2, only the following cipher suites are supported:
* TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256
* TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256
* TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305_SHA256
* TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305_SHA256
* TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384
* TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384
In addition, only RSA certificates with a public key length of 2048 bits or
above, and ECDSA certificates using secp256r1 and secp384r1 curves are
supported. Certificates must be valid at the time of the connection.
Certificates issued on or after 2020-09-01 must have a validity period of 398 or
fewer days. Certificates issued before 2020-09-01 must have a validity period of
825 or fewer days.
You will be asked to submit your certificate for allowlisting during the
onboarding process. After completion of the onboarding process, you can use the
support console to manage the certificate for your production account.
**9.3 Is the use of SNI (Server Name Indication) required when connecting to the
EPP endpoint?**
@@ -515,11 +589,20 @@ following code snippet for illustration:
SSLContext sslContext = SSLContext.getInstance("TLSv1");
SSLSocketFactory sslSocketFactory = sslContext.getSocketFactory();
SSLSocket sslSocket = (SSLSocket) sslSocketFactory.createSocket();
if (sslSocket instanceof sun.security.ssl.SSLSocketImpl) {
((sun.security.ssl.SSLSocketImpl) sslSocket).setHost(server);
if(sslSocket instanceof sun.security.ssl.SSLSocketImpl){
((sun.security.ssl.SSLSocketImpl)sslSocket).
setHost(server);
}
socket.connect(new InetSocketAddress(server, port), timeout);
socket.
connect(new InetSocketAddress(server, port),timeout);
```
We recommend that registrars use Java7 or later, as they have native SNI
support, unlike previous versions of the JDK.
**9.5 When do I get a username/password for the production EPP system?**
You will receive a username and password upon passing OT&E and creation of your
registrar account.

View File

@@ -50,7 +50,7 @@ npm cache clean -f
npm install -g n
# Retrying because fails are possible for node.js intallation. See
# https://github.com/nodejs/build/issues/1993
for i in {1..5}; do n 22.7.0 && break || sleep 15; done
for i in {1..5}; do n 22.12.0 && break || sleep 15; done
# Install gp_dump
apt-get install postgresql-client-17 procps -y